def create_accidentals(id):
    text_from = INTERVAL_FROM[id][1:]
    text_to = INTERVAL_TO[id][1:]
    width = max(len(text_from), len(text_to)) * FONT_BOX['accidental'][0]
    size = (width, FONT_BOX['note'][1])
    if text_from:
        accidental_from = mpy.TextClip(text_from,
            **STYLE['accidental_inactive'],
            size=size,
        ) \
            .set_start(1) \
            .set_position(('right', 'bottom'))
    else:
        accidental_from = None
    if text_to:
        accidental_to = mpy.TextClip(text_to,
            **STYLE['accidental_inactive'],
            size=size,
        ) \
            .set_start(1) \
            .set_position(('right', 'top'))
    else:
        accidental_to = None
    return mpy.CompositeVideoClip(
        [
            accidental_from or empty_clip(),
            accidental_to or empty_clip(),
        ],
        size=(max(1, width), EXAMPLE_HEIGHT),
    )
Example #2
0
    def random_word_screen(self):
        if self.word_type == "sad":
            r = randint(0, len(sad_word_list))
            word = sad_word_list[r]
        if self.word_type == "happy":
            r = randint(0, len(happy_word_list))
            word = happy_word_list[r]
        if self.word_type == "dark":
            r = randint(0, len(dark_word_list))
            word = dark_word_list[r]
        spaced_word = "  ".join([e for e in word])
        if self.resizeForTikTok == "y":
            clip = mpe.TextClip(spaced_word,
                                fontsize=30,
                                color="white",
                                size=self.clip.size,
                                bg_color="black",
                                method="caption",
                                align="center").set_duration(
                                    self.text_duration)
        else:
            clip = mpe.TextClip(spaced_word,
                                fontsize=50,
                                color="white",
                                size=self.clip.size,
                                bg_color="black",
                                method="caption",
                                align="center").set_duration(
                                    self.text_duration)

        self.total_duration += self.text_duration
        self.clip_list.append(clip)
def create_example(id):
    image = mpy.ImageClip('score/cropped-score-page{}.png'.format(id + 1)) \
        .set_start(0) \
        .set_position((0, 'center'))
    note_from = mpy.TextClip(INTERVAL_FROM[id][0], **STYLE['note']) \
        .set_start(1) \
        .set_position(('right', 'bottom'))
    note_to = mpy.TextClip(INTERVAL_TO[id][0], **STYLE['note']) \
        .set_start(1) \
        .set_position(('right', 'top'))

    notes_vertical_gap = int(
        (EXAMPLE_HEIGHT - (len(BETWEEN_NOTES[id]) + 2) * FONT_BOX['note'][1]) /
        (len(BETWEEN_NOTES[id]) + 1))
    between_notes = [
        mpy.TextClip(note, **STYLE['note_between']).set_start(
            2 + (len(BETWEEN_NOTES[id]) - i) * 0.1).set_position((
                'right',
                FONT_BOX['note'][1] * (i + 1) + notes_vertical_gap * (i + 1),
            )) for i, note in enumerate(reversed(BETWEEN_NOTES[id]))
    ]
    return mpy.CompositeVideoClip(
        [image, note_from, note_to] + between_notes,
        size=(EXAMPLE_WIDTH, EXAMPLE_HEIGHT),
    )
Example #4
0
def simple(size, main, sub=None, author=None, duration=5):
    mainfs = size[0] * 1.5 / longLine(main)
    mc = mped.TextClip(main, fontsize=mainfs, color="white")
    mc = mc.set_position(((size[0] - mc.size[0]) / 2,
                          (size[1] - mc.size[1]) / 4)).set_duration(duration)
    group = [mc]

    if sub != None:
        ms = mped.TextClip(sub,
                           fontsize=min(size[0] / longLine(sub), mainfs - 2),
                           color="white")
        ms = ms.set_position(
            ((size[0] - ms.size[0]) / 2, size[1] / 2)).set_duration(duration)
        group.append(ms)

    if author != None:
        aut = mped.TextClip(author,
                            fontsize=min(mainfs - 4,
                                         size[0] / longLine(author)),
                            color="white")
        aut = aut.set_position(mvm.minus(size,
                                         aut.size)).set_duration(duration)
        group.append(aut)

    return mped.CompositeVideoClip(group, size=size)
def create_heading():
    heading1 = mpy.TextClip(
        'Step 1: Count',
        **STYLE['heading'],
    ) \
        .set_duration(DURATION['step1']['total'])
    heading2 = mpy.TextClip(
        'Step 2: Major Scale',
        **STYLE['heading'],
    ) \
        .set_duration(DURATION['step2']['total'])
    return mpy.concatenate_videoclips([
        heading1,
        heading2,
    ])
Example #6
0
    def frame_out(font, video_width, dic_of_names, text, clip_1_start, clip_1_end, crop_bot, crop_top):
        if any([v is None for v in [font, video_width, dic_of_names, text, crop_bot, crop_top]]):
            raise PreventUpdate

        clip_1 = mpy.VideoFileClip(dic_of_names[list(dic_of_names)[0]])
        clip_1 = clip_1.fx(mpy.vfx.resize, width=video_width)
        clip_1 = clip_1.subclip(t_start=clip_1_start, t_end=clip_1_end)
        clip_1 = clip_1.fx(mpy.vfx.crop, y1=crop_top, y2=clip_1.size[1]-crop_bot)
        txt_clip = mpy.TextClip(text,
                           size=clip_1.size,
                           color='white',
                           bg_color='black',
                           font=font
                           ).set_duration(clip_1.duration)
        # for image export in memory using PIL (for base64 convert), need to apply mask manually
        f = clip_1.fx(mpy.vfx.resize, width=540).get_frame(t=0)
        mask = 255 * txt_clip.fx(mpy.vfx.resize, width=540).to_mask().get_frame(t=0)
        ff = np.dstack([f, mask]).astype('uint8')

        im = Image.fromarray(ff)
        rawBytes = io.BytesIO()
        im.save(rawBytes, "PNG")
        rawBytes.seek(0)

        return html.Img(src=f"data:image/PNG;base64, {b64encode(rawBytes.read()).decode('utf-8')}")
Example #7
0
def annotate(clip, txt, txt_color="white", fontsize=24, font="Arial-Bold"):
    # Writes a text at the bottom of the clip  'Xolonium-Bold'
    txtclip = editor.TextClip(
        txt, fontsize=fontsize, font=font, color=txt_color
    ).on_color(color=[0, 0, 0])
    cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(("center", 50))])
    return cvc.set_duration(clip.duration)
Example #8
0
def generate_subtitles_clip(subtitles,
                            fontsize=60,
                            color="white",
                            stroke_width=2,
                            stroke_color="black"):
    '''
    params:
    * subtitles: parse script
    '''
    text_clips = []

    for content in subtitles:
        if isinstance(content, SContent):
            text_content = mpy.TextClip(
                content.text,
                color=color,
                stroke_color=stroke_color,
                stroke_width=stroke_width,
                font="ArialUnicode",  # TODO: change the font
                fontsize=fontsize)
            text_on_color = text_content.on_color(PREVIEW_SIZE,
                                                  pos=('center', 'bottom'),
                                                  col_opacity=0)
            text_clip = text_on_color.set_duration(content.dur)
            text_clips.append(text_clip)
            # EH: add some spaces between subtitles

    return mpy.concatenate_videoclips(text_clips)
Example #9
0
def scheduled_time_scene_transition(schedule, resource_folder_name="res"):
    '''
    params:
    - schedule: a list of tuples of (file name, dur)
    '''
    clips = []
    print(schedule)  #DEBUG
    for res, dur, params in schedule:
        # EH: use a better way to detect the type of a file
        file_name = os.path.join(resource_folder_name, res)
        if not os.path.exists(file_name):
            print("File not found! {}".format(file_name))
            raise FileNotFoundError()
        file_type = res.split(".")[-1]
        if file_type in ["mov", "mp4", "avi", "flv"]:
            origin_video_clip = mpy.VideoFileClip(os.path.join(
                resource_folder_name, res),
                                                  audio=False)
            if params["part"]:
                #print(params["part"])
                parts = params["part"]
                origin_video_clip = origin_video_clip.subclip(
                    parts[0], parts[1])
            if params["crop"]:
                w = origin_video_clip.w
                h = origin_video_clip.h
                rect = params["crop"]
                origin_video_clip = vfx.crop(origin_video_clip, w * rect[0],
                                             h * rect[1], w * rect[2],
                                             h * rect[3])
            clips.append(
                set_video_dur(resize_and_fit(origin_video_clip, PREVIEW_SIZE),
                              dur))
        elif file_type in ["jpg", "png", "jpeg"]:
            origin_img_clip = mpy.ImageClip(
                os.path.join(resource_folder_name, res))
            if params["crop"]:
                w = origin_img_clip.w
                h = origin_img_clip.h
                rect = params["crop"]
                #print("Crop", w, h, rect, rect[0]*w)
                origin_img_clip = vfx.crop(origin_img_clip, w * rect[0],
                                           h * rect[1], w * rect[2],
                                           h * rect[3])
            clips.append(
                set_img_dur(resize_and_fit(origin_img_clip, PREVIEW_SIZE),
                            dur))
        elif file_type in ["txt"]:
            print(res)
            print(os.path.join(resource_folder_name, res))
            origin_txt_clip = mpy.TextClip(
                open(os.path.join(resource_folder_name, res)).read(),
                color="white",
                font="ArialUnicode",
                fontsize=100).on_color(PREVIEW_SIZE).set_position("center")
            clips.append(
                set_scene_dur(resize_and_fit(origin_txt_clip, PREVIEW_SIZE),
                              dur))

    return mpy.concatenate_videoclips(clips)
Example #10
0
 def random_word_screen(self):
     r = randint(0, len(word_list))
     word = word_list[r]
     spaced_word = '  '.join([e for e in word])
     clip = mpe.TextClip(spaced_word, fontsize = 70, color = 'white',size=self.clip.size,bg_color = 'black',method='caption',align='center').set_duration(2)
     self.clip_list.append(clip)
     self.total_duration += 2
Example #11
0
    def process_pre_video(n_clicks, dic_of_names, clip_1_start, clip_1_end, video_width, text, font, crop_bot, crop_top):
        if n_clicks is None:
            raise PreventUpdate

        if dic_of_names is None:
            return None

        if text is None:
            text = ''
        clip_1 = mpy.VideoFileClip(dic_of_names[list(dic_of_names)[0]])
        clip_1 = clip_1.fx(mpy.vfx.resize, width=video_width)
        clip_1 = clip_1.subclip(t_start=clip_1_start, t_end=clip_1_end)
        clip_1 = clip_1.fx(mpy.vfx.crop, y1=crop_top, y2=clip_1.size[1]-crop_bot)
        txt_clip = mpy.TextClip(text,
                           size=clip_1.size,
                           color='white',
                           bg_color='black',
                           font=font
                           ).set_duration(clip_1.duration)
        clip_1 = clip_1.set_mask(txt_clip.to_mask())

        ffname = Path("downloads") / f'{str(uuid.uuid4())}.mp4'
        Path.mkdir(ffname.parent, parents=True, exist_ok=True)
        cvc = mpy.CompositeVideoClip([clip_1], bg_color=(255, 255, 255))
        # preview video set to 540 width and 5 fps
        fn_pre = '.'.join(str(ffname).split('.')[:-1]) + 'preview_.webm'
        cvc.fx(mpy.vfx.resize, width=540).write_videofile(fn_pre, audio=False, fps=5)
        # write full deal
        cvc.write_videofile(str(ffname), audio=False, fps=clip_1.fps)

        vid = open(fn_pre, 'rb')
        base64_data = b64encode(vid.read())
        base64_string = base64_data.decode('utf-8')
        return [html.Video(src=f'data:video/webm;base64,{base64_string}', controls=True)], f'/{ffname}', False
Example #12
0
def annotate(clip, txt, txt_color='red', fontsize=30, font='Xolonium-Bold'):
    txtclip = editor.TextClip(txt,
                              fontsize=fontsize,
                              font=font,
                              color=txt_color)
    cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'top'))])
    return cvc.set_duration(clip.duration)
Example #13
0
def edit_video(loadtitle, savetitle, cuts):
    # load file
    video = mpy.VideoFileClip(loadtitle)

    # cut file
    clips = []
    for cut in cuts:
        clip = video.subclip(cut[0], cut[1])
        clips.append(clip)

    final_clip = mpy.concatenate_videoclips(clips)

    # add text
    txt = mpy.TextClip('Please Subscribe!',
                       font='Courier',
                       fontsize=120,
                       color='white',
                       bg_color='gray35')
    txt = txt.set_position(('center', 0.6), relative=True)
    txt = txt.set_start((0, 3))  # (min, s)
    txt = txt.set_duration(4)
    txt = txt.crossfadein(0.5)
    txt = txt.crossfadeout(0.5)

    final_clip = mpy.CompositeVideoClip([final_clip, txt])

    # save file
    final_clip.write_videofile(savetitle,
                               threads=4,
                               fps=24,
                               codec=vcodec,
                               preset=compression,
                               ffmpeg_params=["-crf", videoquality])

    video.close()
Example #14
0
def title1(t1, sSize):
    tc1 = mped.TextClip(t1, color="white", font="Ariel",
                        fontsize=100).set_duration(10)
    center = ((sSize[0] - tc1.w) / 2, (sSize[1] - tc1.h) / 2)

    tc1 = tc1.set_pos(lambda t: (center[0] + t * 10, center[1] - t * 5))
    return tc1
Example #15
0
    def log_paths_as_videos(self,
                            paths,
                            step,
                            max_videos_to_save=2,
                            fps=10,
                            video_title='video'):

        # reshape the rollouts
        videos = [p['image_obs'] for p in paths]

        # max rollout length
        max_videos_to_save = np.min([max_videos_to_save, len(videos)])
        max_length = videos[0].shape[0]
        for i in range(max_videos_to_save):
            if videos[i].shape[0] > max_length:
                max_length = videos[i].shape[0]

        # pad rollouts to all be same length
        for i in range(max_videos_to_save):
            if videos[i].shape[0] < max_length:
                padding = np.tile([videos[i][-1]],
                                  (max_length - videos[i].shape[0], 1, 1, 1))
                videos[i] = np.concatenate([videos[i], padding], 0)

            clip = mpy.ImageSequenceClip(list(videos[i]), fps=fps)
            txt_clip = (mpy.TextClip(video_title, fontsize=30,
                                     color='white').set_position(
                                         'top', 'center').set_duration(10))

            video = mpy.CompositeVideoClip([clip, txt_clip])
            new_video_title = video_title + '{}_{}'.format(step, i) + '.mp4'
            filename = os.path.join(self._log_dir, new_video_title)
            video.write_videofile(filename, fps=fps)
Example #16
0
def blueInfo(message,
             wsize,
             start=0,
             cenpos=None,
             duration=9,
             bcol=None,
             align='center'):
    sx, sy = wsize

    if cenpos == None:
        cenpos = (sx / 2, sy / 2)
    px, py = cenpos
    dx = min(px, sx - px)
    res = mped.TextClip(message,
                        font='Courier-bold',
                        fontsize=dx * 2.5 / longLine(message),
                        color="blue",
                        align=align).set_duration(duration)
    rsx, rsy = res.size

    if bcol == None:
        return res.set_position((px - rsx / 2, py - rsy / 2)).set_start(start)

    # add Background Square
    colClip = mped.ColorClip(res.size, bcol, duration=duration)

    return mped.CompositeVideoClip([colClip, res], size=res.size).set_position(
        (px - rsx / 2, py - rsy / 2)).set_start(start)
Example #17
0
def create_text_clip(string, b_time=0, e_time=0):
    text_clip = mpy.TextClip(string,
                             font=r"Noto-Sans-CJK-SC-Bold",
                             fontsize=16,
                             color="#fff")
    text_clip = text_clip.set_start(b_time).set_end(e_time)
    return text_clip
Example #18
0
def convert(videos, sounds, dest, tduration=30, mduration=30):
    acc = 0
    clips = []

    for v, s in zip(videos, sounds):
        c = read_clip(v)
        if c.duration >= mduration:
            continue
        c = process_clip(c, s)
        acc += c.duration
        clips.append(c)
        if acc > tduration:
            break

    end_clip = mp.TextClip("FIN",
                           fontsize=100,
                           color="white",
                           font="garamond",
                           method="caption").set_duration(3)
    clips.append(end_clip)

    output = mp.concatenate_videoclips(clips, method="compose")
    music = audio_loop.audio_loop(volumex.volumex(
        mp.AudioFileClip("bgm.mp3").set_start(0), 0.2),
                                  duration=output.duration)

    new_audio = mp.CompositeAudioClip([music, output.audio])
    output = output.set_audio(new_audio)
    output.write_videofile(dest + "/" + "output.mp4")
Example #19
0
def make_gif(images, fname, duration=2, true_image=False, salience=False, salIMGS=None):
    def make_frame(t):
        try:
            x = images[int(len(images) / duration * t)]
        except:
            x = images[-1]

        if true_image:
            return x.astype(np.uint8)
        else:
            return ((x + 1) / 2 * 255).astype(np.uint8)

    def make_mask(t):
        try:
            x = salIMGS[int(len(salIMGS) / duration * t)]
        except:
            x = salIMGS[-1]
        return x

    txtClip = mpy.TextClip('.', color='white', font="Amiri-Bold",
                       kerning=5, fontsize=10)
    clip = mpy.VideoClip(make_frame, duration=duration)
    clip = mpy.CompositeVideoClip([clip, txtClip])
    clip.duration = duration
    if salience == True:
        mask = mpy.VideoClip(make_mask, ismask=True, duration=duration)
        clipB = clip.set_mask(mask)
        clipB = clip.set_opacity(0)
        mask = mask.set_opacity(0.1)
        mask.write_gif(fname, fps=len(images) / duration, verbose=False, logger=None)
        # clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
    else:
        clip.write_gif(fname, fps=len(images) / duration, verbose=False, logger=None)
Example #20
0
    def __show_speed(self):
        """
        In verbose mode show speeds digitally below airspeed indicator.
        """

        # Only shw speeds in verbose mode.
        if self._DigSpeed:

            # Iterate threw track points and grap speed and length.
            speedClips = []
            for trkPt in self._Speeds:
                speed = "%2.1f" % trkPt['speed']
                length = trkPt['duration']

                # Create TextClip for each track point.
                speedClips.append(
                    mpy.TextClip(txt=speed,
                                 color="white",
                                 bg_color="transparent",
                                 fontsize=30,
                                 print_cmd=False,
                                 tempfilename="text" + speed + ".png",
                                 remove_temp=True).set_duration(length))

            # Merge track point text clips.
            self._SpeedClip = mpy.concatenate_videoclips(speedClips)
Example #21
0
    def put_cover_text(self, clips):
        print('正在添加字幕……', end='')
        if self.crs_name[0].upper() == 'L':  #wedo课程
            crs_h_name = 0.05
            crs_h_age = 0.3
            crs_h_lego = 0.4
            crs_h_intro = 0.6
            clr = '#6AB34A'
        elif self.crs_name[0].upper() == 'N':  #9686课程
            crs_h_name = 0.05
            crs_h_age = 0.3
            crs_h_lego = 0.35
            crs_h_intro = 0.6
            clr = '#06419A'
        else:
            crs_h_name = 0.05
            crs_h_age = 0.6
            crs_h_lego = 0.65
            crs_h_intro = 0.8
            clr = '#6AB34A'

        crs_info = self.read_excel()
        crs_name, age, knowledge, script, dif_level, instrument = crs_info

        text = mpy.TextClip(txt=crs_name, fontsize=70, font='j:/fonts/yousheTitleHei.ttf',color=clr) \
                .set_fps(25).set_position((self.w*0.1,self.h*crs_h_name)).set_duration(self.drtn).set_start(0)
        clips.append(text)

        text = mpy.TextClip(txt='适合年龄:'+age , fontsize=20, font='j:/fonts/yousheTitleHei.ttf',color=clr) \
                .set_fps(25).set_position((self.w*0.1,self.h*crs_h_age)).set_duration(self.drtn).set_start(0)
        clips.append(text)

        text = mpy.TextClip(txt='使用教具:'+instrument, fontsize=20, font='j:/fonts/yousheTitleHei.ttf',color=clr) \
                .set_fps(25).set_position((self.w*0.1,self.h*crs_h_lego)).set_duration(self.drtn).set_start(0)
        clips.append(text)

        text = mpy.TextClip(txt=knowledge,align='West',fontsize=25, font='j:/fonts/yousheTitleHei.ttf',color=clr) \
                .set_fps(25).set_position((self.w*0.1,self.h*crs_h_intro)).set_duration(self.drtn).set_start(0)
        clips.append(text)

        # cover_clip=mpy.CompositeVideoClip(clips)

        # fn=os.path.join(self.src_dir,self.crs_name,self.crs_name+'_building_animation.mp4')
        # cover_clip.write_videofile(fn)

        print('完成')
        return clips
Example #22
0
def prepare_intro_clips(reddit_post):
    # iv = []
    voice_rng = np.random.randint(0, len(google_voices))
    rpost = reddit_post
    intro_text = rpost.subreddit.title
    intro_text = intro_text.rstrip('...')
    intro_text = 'r/' + intro_text + ', asks: ' + rpost.title
    intro_text = textwrap.wrap(intro_text, width=80)
    intro_text = '\n'.join(intro_text)
    # intro_text_clip = mpy.TextClip(intro_text, color='white', fontsize=18).set_position(('center', height / 1.2)).set_start(0)
    make_temp_voice(intro_text, voice_rng)
    intro_audio_clip = mpy.AudioFileClip(
        audio_temp, buffersize=1000000).set_start(0)  # load temp audio file
    intro_time = intro_audio_clip.duration

    outro_image_offset = height / 10
    intro_image = mpy.ImageClip(
        icon_assets_path + 'reddit_logo_verylarge_middle.png').set_position(
            ('center', outro_image_offset)).resize(.4)

    intro_text_clip = mpy.TextClip(
        intro_text, color='white', fontsize=18).set_position(
            ('center', outro_image_offset + intro_image.h +
             20)).set_start(0).set_duration(intro_audio_clip.duration)
    try:
        author_text_clip = mpy.TextClip(
            'posted by: ' + rpost.author.name,
            color=dark_grey_text_color,
            fontsize=14).set_position(
                ('center',
                 outro_image_offset + intro_image.h + intro_text_clip.h +
                 30)).set_start(0).set_duration(intro_audio_clip.duration)
    except AttributeError:
        author_text_clip = mpy.TextClip(
            'posted by: ' + '[Deleted] (RIP)',
            color=dark_grey_text_color,
            fontsize=14).set_position(
                ('center',
                 outro_image_offset + intro_image.h + intro_text_clip.h +
                 30)).set_start(0).set_duration(intro_audio_clip.duration)
    intro_image = intro_image.set_duration(intro_time)
    # print(intro_time)

    # for i in range(len(iv)):
    #     iv[i].set_end(it)

    return intro_text_clip, intro_audio_clip, intro_time, intro_image, author_text_clip
Example #23
0
 def subtitle_right_btm(self, txt, fsz, w, h, drtn):
     x = w - len(txt) * fsz - 30
     y = h - fsz - 30
     text = mpy.TextClip(txt=txt,
                         fontsize=fsz,
                         font='Microsoft-YaHei-&-Microsoft-YaHei-UI',
                         color='#95ff67').set_pos((x, y)).set_duration(drtn)
     return text
Example #24
0
def generate_text_clip(text, ftsize, dur):
    txt_clip = mp.TextClip(text,
                           font='Amiri-Bold',
                           fontsize=ftsize,
                           color='white',
                           stroke_color='black')
    txt_clip = txt_clip.set_pos('center').set_duration(dur)
    return txt_clip
Example #25
0
def annotate(clip, txt, txt_color='white', fontsize=30, font='MDotum'):
    # Writes a text at the bottom of the clip  'Xolonium-Bold'
    txtclip = editor.TextClip(txt,
                              fontsize=fontsize,
                              font=font,
                              color=txt_color).on_color(color=[0, 0, 0])
    cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 50))])
    return cvc.set_duration(clip.duration)
Example #26
0
def caption_clip(clip, text, position=('center', 5), wrap=16, fontsize=10, font="DejaVu-Sans-Bold"):
    
    assert font in mpy.TextClip.list('font')
    
    text = '\n'.join(textwrap.wrap(text, wrap))                
    text_clip = mpy.TextClip(text, fontsize=fontsize, color='white', font=font)
    text_clip = text_clip.set_position(position).set_duration(clip.duration)

    return mpy.CompositeVideoClip([clip, text_clip])
Example #27
0
def make_element(quote, author, size):
    signature = (
        moviepy.TextClip(
            "- %s" % author,
            fontsize=30,
            color='gray',
        )
        #font="Amiri-Slanted")
        .margin(right=30, bottom=30, opacity=0).set_pos(("right", "bottom")))
    quote = '\n'.join(textwrap.wrap(quote))
    quote_clip = (moviepy.TextClip(
        quote, fontsize=23, font="Amiri-Bold", align="center").set_duration(
            len(quote) *
            .065).set_pos("center").crossfadein(.2).crossfadeout(.2))
    element = (moviepy.CompositeVideoClip(
        [signature, quote_clip], size=size,
        bg_color=(233, 229, 90)).set_duration(quote_clip.duration))
    return element
Example #28
0
def stitch_clips(clips):

    video_clips = []

    for i, c in enumerate(clips):
        curr_clip = mpy.VideoFileClip('clips/clip%02d.mp4' % i)

        w, h = curr_clip.size

        txt_clip1 = (mpy.TextClip('\"' + c['title'] + '\"',
                                  font='Amiri-regular',
                                  fontsize=70,
                                  color='white').set_duration(5).set_position(
                                      (10, 10)))

        txt_clip2 = (mpy.TextClip(c['broadcaster_name'],
                                  font='Amiri-regular',
                                  fontsize=30,
                                  color='white').set_duration(5).set_position(
                                      (80, 100)))

        txt_clip = mpy.CompositeVideoClip([txt_clip1, txt_clip2])

        txt_col = txt_clip.on_color(size=(txt_clip.w + 20, txt_clip.h + 90),
                                    color=(0, 0, 0),
                                    pos=(10, 10),
                                    col_opacity=0.6)

        logo = (
            mpy.ImageClip("twitch_logo.png").set_duration(5).resize(
                width=70, height=70)  # if you need to resize...
            .set_position((10, 85)))

        result = mpy.CompositeVideoClip([curr_clip, txt_col, txt_clip2, logo])

        video_clips.append(result)

    # Bug in moviepy=1.0.2 throws error here, so using moviepy=1.0.0
    final_clip = mpy.concatenate_videoclips(video_clips, method='compose')

    final_clip.write_videofile('final.mp4', fps=60)

    # Remove clip files
    delete_clips()
Example #29
0
def create_description():
    empty1 = empty_clip().set_duration(DURATION['step1']['heading'])
    empty2 = empty_clip().set_duration(DURATION['step2']['heading'])
    description1 = mpy.TextClip(
        'Count the number of notes (inclusive) ignoring accidentals',
        **STYLE['description'],
    ) \
        .set_duration(DURATION['step1']['total'] - DURATION['step1']['heading'])
    description2 = mpy.TextClip(
        'Write down the major scale of the lower note',
        **STYLE['description'],
    ) \
        .set_duration(DURATION['step2']['total'] - DURATION['step2']['heading'])
    return mpy.concatenate_videoclips([
        empty1,
        description1,
        empty2,
        description2,
    ])
Example #30
0
 def annotate(clip, txt, txt_color=txt_color, fontsize=fontsize, font=font):
     """ Writes a text at the bottom of the clip. """
     txtclip = editor.TextClip(txt,
                               fontsize=fontsize,
                               font=font,
                               color=txt_color)
     # txtclip = txtclip.on_color((clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, 'center'))
     cvc = editor.CompositeVideoClip(
         [clip, txtclip.set_pos(('center', 'bottom'))])
     return cvc.set_duration(clip.duration)