コード例 #1
0
ファイル: tbc.py プロジェクト: lstn/jjba-continued
def fade_freeze(im_freeze, fade, tbc_overlay, tbc_duration, fadein_duration):
    fade_fx = mp.CompositeVideoClip([
        fade, tbc_overlay
    ]).add_mask().set_duration(tbc_duration).crossfadein(fadein_duration)
    faded_clip = mp.CompositeVideoClip([im_freeze, fade_fx])

    return faded_clip
コード例 #2
0
ファイル: script.py プロジェクト: 890popbox/sad-boy-edit
 def add_clip(self, trim):
     r = randint(0, floor(self.clip.duration - trim))
     #Pick random clip ^, Put colours in array, pick random colour below..
     colour_nums = [colours[a] for a in self.colournames]
     randcolour = colour_nums[randint(0, len(colour_nums))]
     subclip = self.clip.subclip(r, r + (r % trim))
     merged = mpe.CompositeVideoClip(
         [subclip, self.overlay.subclip(2, 2 + r % trim)])
     if r % 2 == 0:  #adds a fade_in transition if r is even.
         merged = mpv.fx.all.fadein(merged, 3)
     image = mpe.ImageClip('assets/' + randcolour + '.png').resize(
         self.clip.size).set_opacity(0.35).set_duration(trim)
     merged = mpe.CompositeVideoClip([merged, image])
     self.clip_list.append(merged)
     self.total_duration += r % trim
コード例 #3
0
def _MakeActivationVideoOneLayer(m, clip_dict, layer_no):
    labels = ["conv1", "conv2", "conv3", "fc", "output"]
    scales = [1.5, 2.0, 2.0, 0.5, 1.5]

    #get game frames
    clip1 = clip_dict['frames']

    #get activations from one layer
    layer_name = m.layers[layer_no]['name']
    clip2 = clip_dict[layer_name]
    clip2_scale = scales[layer_no]
    clip2 = clip2.resize(clip2_scale)

    #calculate size of background canvas
    total_size_x = clip1.size[0] + clip2.size[0]
    total_size_y = max(clip1.size[1], clip2.size[1])

    #create background canvas
    bg_clip = mpy.ColorClip(size=(total_size_x, total_size_y),
                            color=(255, 255, 255))

    duration = clip2.duration

    #align clips on canvas
    clip1 = clip1.set_position(pos=(0, "center"))
    clip2 = clip2.set_position(pos=((total_size_x - clip2.size[0], "center")))

    clip_list = [bg_clip, clip1, clip2]

    #composite together
    cc = mpy.CompositeVideoClip(clip_list,
                                (total_size_x, total_size_y)).subclip(
                                    0, duration)
    #cc.ipython_display()
    return cc
コード例 #4
0
ファイル: app.py プロジェクト: corstar/compilation-bot
def videofier(music_path):
    w, h = 1280, 720
    crop_size = 2
    files = glob.glob('*.mp4')
    random.shuffle(files)
    clip_list = []

    for f in files:
        clip = mp.VideoFileClip(f)
        bc_args = {'height':h}
        clip_args = {'width':w}
        center = {'x_center':w / 2}

        if clip.w / clip.h < 16 / 9:
            bc_args, clip_args = clip_args, bc_args
            center = {'y_center':h / 2}

        blurred_clip = clip.resize(**bc_args).crop(**center, **clip_args).fl_image(blur)
        clip = clip.resize(**clip_args).crop(x1=crop_size, width=w - crop_size * 2,
                y1=crop_size, height=h - crop_size * 2).margin(crop_size, color=(0, 0, 0))

        clip_list.append(mp.CompositeVideoClip([blurred_clip, clip.set_pos('center')]))

    final_clip = mp.concatenate_videoclips(clip_list).fadein(2).fadeout(2)
    final_clip.write_videofile('silent.mp4', fps=24, audio=None)

    FFmpeg(inputs={'silent.mp4': None, music_path: None}, outputs={'final.mp4': '-shortest'}).run()
    os.remove('silent.mp4')
コード例 #5
0
def render(files, dirs, mixer=95, portat=0, music=False):
    videofile = mp.VideoFileClip(dirs + files)
    if portat == "portat":
        video = videofile.resize((360, 480))
    else:
        video = videofile.resize(width=480)
    if video.duration <= 120:
        if music != False:
            musics = mp.AudioFileClip(f"videoassets/{music}.mp3")
            video = video.set_audio(mp.CompositeAudioClip([musics.volumex(
                1 - mixer/100), video.audio.volumex(mixer/100)]).set_duration(video.duration))
        intro = mp.VideoFileClip("videoassets/quickskitsoliveintro.mp4").resize(
            video.size
        )
        logoimage = mp.ImageClip("videoassets/logo.png")
        logo = (
            logoimage.set_duration(video.duration)
            .resize(height=40)
            .margin(right=50, bottom=50, opacity=0)
            .set_pos(("right", "bottom"))
        )
        final = mp.CompositeVideoClip([video, logo])
        final = mp.concatenate_videoclips([final, intro.fadein(1).fadeout(1)])

        newformat = changeformat("rendered_" + files, ".mp4")
        final.write_videofile(dirs + newformat, fps=20,
                              codec="mpeg4", audio_codec='aac', threads=4)
        intro.close()
        logoimage.close()
        videofile.close()
        if music != False:
            musics.close()
        return newformat
    else:
        return False
コード例 #6
0
 def make_frame(self, t):
     i, r = divmod(t, 1)
     i = int(i)
     tree = self.interpolate(i, i + 1, r)
     fg = self.tree_to_clip(tree)
     bg = mpy.ColorClip((2560, 1440), (.5, .5, .5), duration=1)
     return mpy.CompositeVideoClip([bg, fg]).make_frame(0)
コード例 #7
0
ファイル: frame_5.py プロジェクト: strangest-quark/iConsent
 def generate_video_part(self, txnId):
     if not self.config.LOCAL:
         os.chdir("/var/task/")
     W, H = self.config.VIDEO_SIZE
     bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_5.png")
     type_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(self.input_map.get("type"))). \
         set_position((W/2-50, H/5)).resize(width=self.config.ICON_SIZE)
     self.text_to_speech(self.fill_text(self.fill_text(Frame5.lang_map.get('audio5'), 1), 2), Frame5.lang_map.get('lan'), txnId)
     audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" + txnId + "-5.mp3")
     Frame5.map['text5'] = self.fill_text(self.fill_text(Frame5.lang_map.get('text5'), 1), 2)
     straight_text(Frame5.map['text5'], Frame5.lang_map.get('font'), Frame5.lang_map.get('fontsize5'), txnId, 5, self.config)
     text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-5-' + txnId+'.png')
     video = mpy.CompositeVideoClip(
         [
             bgImage,
             type_logo,
             text.set_position(('center', type_logo.size[1] + 40)),
         ],
         size=self.config.VIDEO_SIZE). \
         on_color(
         color=self.config.WHITE,
         col_opacity=1).set_duration(audioclip.duration)
     new_audioclip = CompositeAudioClip([audioclip])
     video.audio = new_audioclip
     os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId + '-5.mp3')
     os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-5-' + txnId+'.png')
     return video, 5
コード例 #8
0
def create_accidentals(id):
    text_from = INTERVAL_FROM[id][1:]
    text_to = INTERVAL_TO[id][1:]
    width = max(len(text_from), len(text_to)) * FONT_BOX['accidental'][0]
    size = (width, FONT_BOX['note'][1])
    if text_from:
        accidental_from = mpy.TextClip(text_from,
            **STYLE['accidental_inactive'],
            size=size,
        ) \
            .set_start(1) \
            .set_position(('right', 'bottom'))
    else:
        accidental_from = None
    if text_to:
        accidental_to = mpy.TextClip(text_to,
            **STYLE['accidental_inactive'],
            size=size,
        ) \
            .set_start(1) \
            .set_position(('right', 'top'))
    else:
        accidental_to = None
    return mpy.CompositeVideoClip(
        [
            accidental_from or empty_clip(),
            accidental_to or empty_clip(),
        ],
        size=(max(1, width), EXAMPLE_HEIGHT),
    )
コード例 #9
0
def watermark_vid():

    # myClip.resize( (460,720) ) # New resolution: (460,720)

    video = mp.VideoFileClip("files/to_watermark_vid.mp4")
    # print(video.duration)
    logo_path = 'files/' + choice(LOGOS)

    logo_width, logo_height = Image.open(logo_path).size
    width, height = video.size
    offset = int(width / (width_scale * 2.7))
    resized_logo_size = (int(width / width_scale),
                         int(logo_height * (width / width_scale) / logo_width))

    logo = (
        mp.ImageClip(logo_path).set_duration(video.duration).resize(
            width=resized_logo_size[0],
            height=resized_logo_size[1])  # if you need to resize...
        .margin(left=offset, bottom=offset,
                opacity=0)  # (optional) logo-border padding
        .set_pos(("left", "bottom")))

    final = mp.CompositeVideoClip([video, logo])
    final.subclip(0,
                  video.duration).write_videofile("files/watermarked_vid.mp4")
    return video.duration, video.size[0], video.size[1]
コード例 #10
0
def text_overlay(top_text, bottom_text, vid_file_path, output_vid_file_path):
    import moviepy.editor as mp  # this here so long load and print dosnt happen when open gui
    vid_dims = vid_edit_utils.get_vid_dims(vid_file_path)

    #     print('vid_dims:  ', vid_dims)

    # make img with text that will be overlayed on video -- same dims as video
    make_transparent_text_img(top_text, bottom_text, vid_dims, 'text.png')
    #     text_img = Image.open('text.png')

    video = mp.VideoFileClip(vid_file_path)
    og_vid_duration = video.duration

    text_img_overlay = (
        mp.ImageClip('text.png').set_duration(video.duration)
        #           .resize(height=0) # if you need to resize...
        .margin(right=0, top=0, opacity=0)  # (optional) logo-border padding
        .set_pos(("center", "center")))

    final = mp.CompositeVideoClip([video, text_img_overlay])
    final.write_videofile(output_vid_file_path)

    video.reader.close()
    video.audio.reader.close_proc()


# meme_caption.add_caption('green.png', 't.png', 'THIS is SomE TexT', 'bopoooooooottom')
# text_overlay('tooooooooop teeeeeeext', 'booooooottommmm tesssssssefsfdf', 'i.mp4', 'o.mp4')
# vid_h =
コード例 #11
0
ファイル: titles.py プロジェクト: storyfeet/moviecutter
def simple(size, main, sub=None, author=None, duration=5):
    mainfs = size[0] * 1.5 / longLine(main)
    mc = mped.TextClip(main, fontsize=mainfs, color="white")
    mc = mc.set_position(((size[0] - mc.size[0]) / 2,
                          (size[1] - mc.size[1]) / 4)).set_duration(duration)
    group = [mc]

    if sub != None:
        ms = mped.TextClip(sub,
                           fontsize=min(size[0] / longLine(sub), mainfs - 2),
                           color="white")
        ms = ms.set_position(
            ((size[0] - ms.size[0]) / 2, size[1] / 2)).set_duration(duration)
        group.append(ms)

    if author != None:
        aut = mped.TextClip(author,
                            fontsize=min(mainfs - 4,
                                         size[0] / longLine(author)),
                            color="white")
        aut = aut.set_position(mvm.minus(size,
                                         aut.size)).set_duration(duration)
        group.append(aut)

    return mped.CompositeVideoClip(group, size=size)
コード例 #12
0
def make_gif(images, fname, duration=2, true_image=False, salience=False, salIMGS=None):
    def make_frame(t):
        try:
            x = images[int(len(images) / duration * t)]
        except:
            x = images[-1]

        if true_image:
            return x.astype(np.uint8)
        else:
            return ((x + 1) / 2 * 255).astype(np.uint8)

    def make_mask(t):
        try:
            x = salIMGS[int(len(salIMGS) / duration * t)]
        except:
            x = salIMGS[-1]
        return x

    txtClip = mpy.TextClip('.', color='white', font="Amiri-Bold",
                       kerning=5, fontsize=10)
    clip = mpy.VideoClip(make_frame, duration=duration)
    clip = mpy.CompositeVideoClip([clip, txtClip])
    clip.duration = duration
    if salience == True:
        mask = mpy.VideoClip(make_mask, ismask=True, duration=duration)
        clipB = clip.set_mask(mask)
        clipB = clip.set_opacity(0)
        mask = mask.set_opacity(0.1)
        mask.write_gif(fname, fps=len(images) / duration, verbose=False, logger=None)
        # clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
    else:
        clip.write_gif(fname, fps=len(images) / duration, verbose=False, logger=None)
コード例 #13
0
def annotate(clip, txt, txt_color="white", fontsize=24, font="Arial-Bold"):
    # Writes a text at the bottom of the clip  'Xolonium-Bold'
    txtclip = editor.TextClip(
        txt, fontsize=fontsize, font=font, color=txt_color
    ).on_color(color=[0, 0, 0])
    cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(("center", 50))])
    return cvc.set_duration(clip.duration)
コード例 #14
0
ファイル: frame_1.py プロジェクト: strangest-quark/iConsent
    def generate_video_part(self, txnId):
        if not self.config.LOCAL:
            os.chdir("/var/task/")
        W, H = self.config.VIDEO_SIZE
        bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_1.png")
        fiu_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(self.input_map.get("fiu"))). \
            set_position((W/2-150, H/5)).resize(height=self.config.ICON_SIZE)
        arrow_gif = VideoFileClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get("left_arrow")). \
            set_position((W/2-30, H/5)).resize(height=self.config.ICON_SIZE)
        account_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(self.input_map.get("account")[0])). \
            set_position((W/2+100, H/5)).resize(height=self.config.ICON_SIZE)
        self.text_to_speech(self.fill_text(self.fill_text(Frame1.lang_map.get('audio1'), 1), 2), Frame1.lang_map.get('lan'), txnId)
        audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" + txnId + "-1.mp3")
        Frame1.map['text1'] = self.fill_text(self.fill_text(Frame1.lang_map.get('text1'), 1), 2)
        straight_text(Frame1.map['text1'], Frame1.lang_map.get('font'), Frame1.lang_map.get('fontsize1'), txnId, 1, self.config)
        text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-1-' + txnId+'.png')
        video = mpy.CompositeVideoClip(
            [
                bgImage,
                fiu_logo,
                arrow_gif,
                account_logo,
                text.set_position(('center', fiu_logo.size[1] + 40)),
            ],
            size=self.config.VIDEO_SIZE). \
            on_color(
            color=self.config.WHITE,
            col_opacity=1).set_duration(audioclip.duration)

        new_audioclip = CompositeAudioClip([audioclip])
        video.audio = new_audioclip
        os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId + '-1.mp3')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-1-' + txnId+'.png')
        return video, 1
コード例 #15
0
    def exp_building_movie(self, exptype='all', total_sec_for_part=25):
        print('正在处理……')

        if exptype == 'all':
            main_movie = self.build_movie(w=850, h=480)
            cover_text = self.put_cover_text(main_movie)
            add_end_video = self.put_endvideo(cover_text)
            mix = self.put_bgm(add_end_video)
            fn = os.path.join(self.src_dir, self.crs_name,
                              self.crs_name + '_building_animation.mp4')
            0
        elif exptype == 'part':
            main_movie = self.build_movie(total_secs=total_sec_for_part,
                                          w=1280,
                                          h=720)
            cover_text = self.put_cover_text(main_movie)
            mix = mpy.CompositeVideoClip(cover_text)
            fn = os.path.join(self.src_dir, self.crs_name,
                              self.crs_name + '_building_animation_only.mp4')
        else:
            print('无效参数')
            sys.exit(0)

        if self.save_yn == 'yes':
            mix.write_videofile(fn)

        return mix
        self.killProcess()
        print('All Done')
コード例 #16
0
ファイル: movies_utils.py プロジェクト: keshava/mmvt
def movie_in_movie(movie1_fname,
                   movie2_fname,
                   output_fname,
                   pos=('right', 'bottom'),
                   movie2_ratio=(1 / 3, 1 / 3),
                   margin=6,
                   margin_color=(255, 255, 255),
                   audio=False,
                   fps=24,
                   codec='libx264'):
    from moviepy import editor
    movie1 = editor.VideoFileClip(movie1_fname, audio=audio)
    w, h = movie1.size

    # THE PIANO FOOTAGE IS DOWNSIZED, HAS A WHITE MARGIN, IS
    # IN THE BOTTOM RIGHT CORNER
    movie2 = (
        editor.VideoFileClip(movie2_fname, audio=False).resize(
            (w * movie2_ratio[0],
             h * movie2_ratio[1])).  # one third of the total screen
        margin(margin, color=margin_color).  # white margin
        margin(bottom=20, right=20, top=20, opacity=0).  # transparent
        set_pos(pos))

    final = editor.CompositeVideoClip([movie1, movie2])
    final.write_videofile(output_fname, fps=fps, codec=codec)
コード例 #17
0
    def combine(self, other: 'Moviepy', other_first: bool = False,  # type: ignore
                crossfade_duration: float = 0) -> None:
        """Combines this video stream with another stream"""
        self.reader_refs += other.reader_refs
        clips = [other.clip, self.clip] if other_first else [self.clip, other.clip]

        if self.has_video and other.has_video:
            if crossfade_duration == 0:
                self.clip = med.concatenate_videoclips(clips)
            else:
                # Have clips[1] start while clips[0] is not finished yet
                clips[1] = clips[1].set_start(max(0, clips[0].duration - crossfade_duration))
                clips[1] = clips[1].fx(transfx.crossfadein, crossfade_duration)
                self.clip = med.CompositeVideoClip([clips[0], clips[1]])
                # TODO: consider calling set_duration?
                self.clip.duration = clips[0].duration + clips[1].duration - crossfade_duration
        else:
            if crossfade_duration == 0:
                assert self.has_video is False and other.has_video is False
                self.clip = med.concatenate_audioclips(clips)
            else:
                # Audio crossfade in: start earlier, fade in with normal audio_fadein effect.
                clips[1] = clips[1].set_start(max(0, clips[0].duration - crossfade_duration))
                clips[1] = clips[1].fx(afx.audio_fadein, crossfade_duration)
                self.clip = med.CompositeAudioClip([clips[0], clips[1]])
                self.clip.duration = clips[0].duration + clips[1].duration - crossfade_duration
コード例 #18
0
ファイル: animateHUD.py プロジェクト: yanjinrong1/gpsHUD
    def generateClips(self):
        """Generate clips"""
        ## get pointer clip
        self.clip_pointer = mpy.VideoClip(self.make_frame,
                                          duration=self.duration)
        ## get pointer mask
        self.clip_pointer_mask = mpy.VideoClip(self.make_frame_mask,
                                               duration=self.duration)
        self.clip_pointer_mask = self.clip_pointer_mask.to_mask(canal=1)

        ## generate glow mask
        self.filtr = lambda im: skf.gaussian(im, sigma=self.p_glow_blur)
        self.clip_pointer_glow_mask = self.clip_pointer_mask.fl_image(
            self.filtr)

        ## assign masks
        self.clip_pointer = self.clip_pointer.set_mask(self.clip_pointer_mask)
        self.clip_pointer_glow = self.clip_pointer.set_mask(
            self.clip_pointer_glow_mask)

        ## generate composite
        self.clip = mpy.CompositeVideoClip([
            self.template,
            self.clip_pointer_glow.set_opacity(self.p_glow_opacity),
            self.clip_pointer.set_opacity(self.p_opacity)
        ])
コード例 #19
0
def edit_video(loadtitle, savetitle, cuts):
    # load file
    video = mpy.VideoFileClip(loadtitle)

    # cut file
    clips = []
    for cut in cuts:
        clip = video.subclip(cut[0], cut[1])
        clips.append(clip)

    final_clip = mpy.concatenate_videoclips(clips)

    # add text
    txt = mpy.TextClip('Please Subscribe!',
                       font='Courier',
                       fontsize=120,
                       color='white',
                       bg_color='gray35')
    txt = txt.set_position(('center', 0.6), relative=True)
    txt = txt.set_start((0, 3))  # (min, s)
    txt = txt.set_duration(4)
    txt = txt.crossfadein(0.5)
    txt = txt.crossfadeout(0.5)

    final_clip = mpy.CompositeVideoClip([final_clip, txt])

    # save file
    final_clip.write_videofile(savetitle,
                               threads=4,
                               fps=24,
                               codec=vcodec,
                               preset=compression,
                               ffmpeg_params=["-crf", videoquality])

    video.close()
コード例 #20
0
def create_example(id):
    image = mpy.ImageClip('score/cropped-score-page{}.png'.format(id + 1)) \
        .set_start(0) \
        .set_position((0, 'center'))
    note_from = mpy.TextClip(INTERVAL_FROM[id][0], **STYLE['note']) \
        .set_start(1) \
        .set_position(('right', 'bottom'))
    note_to = mpy.TextClip(INTERVAL_TO[id][0], **STYLE['note']) \
        .set_start(1) \
        .set_position(('right', 'top'))

    notes_vertical_gap = int(
        (EXAMPLE_HEIGHT - (len(BETWEEN_NOTES[id]) + 2) * FONT_BOX['note'][1]) /
        (len(BETWEEN_NOTES[id]) + 1))
    between_notes = [
        mpy.TextClip(note, **STYLE['note_between']).set_start(
            2 + (len(BETWEEN_NOTES[id]) - i) * 0.1).set_position((
                'right',
                FONT_BOX['note'][1] * (i + 1) + notes_vertical_gap * (i + 1),
            )) for i, note in enumerate(reversed(BETWEEN_NOTES[id]))
    ]
    return mpy.CompositeVideoClip(
        [image, note_from, note_to] + between_notes,
        size=(EXAMPLE_WIDTH, EXAMPLE_HEIGHT),
    )
コード例 #21
0
ファイル: logger.py プロジェクト: adityabingi/CS285-DeepRL
    def log_paths_as_videos(self,
                            paths,
                            step,
                            max_videos_to_save=2,
                            fps=10,
                            video_title='video'):

        # reshape the rollouts
        videos = [p['image_obs'] for p in paths]

        # max rollout length
        max_videos_to_save = np.min([max_videos_to_save, len(videos)])
        max_length = videos[0].shape[0]
        for i in range(max_videos_to_save):
            if videos[i].shape[0] > max_length:
                max_length = videos[i].shape[0]

        # pad rollouts to all be same length
        for i in range(max_videos_to_save):
            if videos[i].shape[0] < max_length:
                padding = np.tile([videos[i][-1]],
                                  (max_length - videos[i].shape[0], 1, 1, 1))
                videos[i] = np.concatenate([videos[i], padding], 0)

            clip = mpy.ImageSequenceClip(list(videos[i]), fps=fps)
            txt_clip = (mpy.TextClip(video_title, fontsize=30,
                                     color='white').set_position(
                                         'top', 'center').set_duration(10))

            video = mpy.CompositeVideoClip([clip, txt_clip])
            new_video_title = video_title + '{}_{}'.format(step, i) + '.mp4'
            filename = os.path.join(self._log_dir, new_video_title)
            video.write_videofile(filename, fps=fps)
コード例 #22
0
    def apply(self, clip, startTime, asset):
        image_clip = moviepy.video.VideoClip.ImageClip(asset.url,
                                                       duration=self.duration)

        leftmost = -image_clip.size[0]
        rightmost = clip.size[0]
        topmost = -image_clip.size[1]
        bottommost = clip.size[1]

        image_clip = moviepy.video.fx.all.resize(
            image_clip, newsize=lambda t: self.zoom_function(t))

        def unnormalize(pos):
            if isinstance(pos[0], str):
                return pos
            else:
                return leftmost + (rightmost - leftmost) * pos[0], topmost + (
                    bottommost - topmost) * pos[1]

        image_clip = image_clip.set_start(startTime)
        ret = editor.CompositeVideoClip([
            clip,
            image_clip.set_pos(
                lambda t: unnormalize(self.translate_function(t)))
        ])
        overtime = startTime + image_clip.duration
        return ret.set_duration(
            clip.duration if clip.duration > overtime else overtime)
コード例 #23
0
ファイル: video.py プロジェクト: estysdesu/LTVideoFlask
    def Overlay(self):
        videoOrig = moviepy.VideoFileClip(self.VideoOrigName)
        videoOrigLen = videoOrig.duration

        codeAmts = 3 # how many video codes to create
        codeDur = 30 # unit: [s]
        codeColors = ["red", "green", "blue", "yellow", "purple", "orange"]
        np.random.shuffle(codeColors)
        codeColors = codeColors[0:codeAmts]
        codeNums = np.random.randint(0, 10000, codeAmts)
        codeNums = [str(n).zfill(4) for n in codeNums] # adds leading zeros for numbers less than 4 digits
        codeZip = list(zip(codeColors, codeNums))
        
        for indx, pair in enumerate(codeZip):

            if indx == 0:
                startTime = 15 # unit: [s]
            elif indx == 1:
                startTime = np.floor(videoOrigLen / 2).astype(int) - 15
            elif indx == 2:
                startTime = np.floor(videoOrigLen - 45).astype(int)

        videoText = self.overlayText(pair[0], pair[1], startTime, codeDur)
        videoWatermark = self.overlayWatermark(videoOrigLen, self.WatermarkPath)

        videoOverlayComp = moviepy.CompositeVideoClip([videoOrig, videoText, videoWatermark])
        
        return videoOverlayComp
コード例 #24
0
    def process_pre_video(n_clicks, dic_of_names, clip_1_start, clip_1_end, video_width, text, font, crop_bot, crop_top):
        if n_clicks is None:
            raise PreventUpdate

        if dic_of_names is None:
            return None

        if text is None:
            text = ''
        clip_1 = mpy.VideoFileClip(dic_of_names[list(dic_of_names)[0]])
        clip_1 = clip_1.fx(mpy.vfx.resize, width=video_width)
        clip_1 = clip_1.subclip(t_start=clip_1_start, t_end=clip_1_end)
        clip_1 = clip_1.fx(mpy.vfx.crop, y1=crop_top, y2=clip_1.size[1]-crop_bot)
        txt_clip = mpy.TextClip(text,
                           size=clip_1.size,
                           color='white',
                           bg_color='black',
                           font=font
                           ).set_duration(clip_1.duration)
        clip_1 = clip_1.set_mask(txt_clip.to_mask())

        ffname = Path("downloads") / f'{str(uuid.uuid4())}.mp4'
        Path.mkdir(ffname.parent, parents=True, exist_ok=True)
        cvc = mpy.CompositeVideoClip([clip_1], bg_color=(255, 255, 255))
        # preview video set to 540 width and 5 fps
        fn_pre = '.'.join(str(ffname).split('.')[:-1]) + 'preview_.webm'
        cvc.fx(mpy.vfx.resize, width=540).write_videofile(fn_pre, audio=False, fps=5)
        # write full deal
        cvc.write_videofile(str(ffname), audio=False, fps=clip_1.fps)

        vid = open(fn_pre, 'rb')
        base64_data = b64encode(vid.read())
        base64_string = base64_data.decode('utf-8')
        return [html.Video(src=f'data:video/webm;base64,{base64_string}', controls=True)], f'/{ffname}', False
コード例 #25
0
ファイル: frame_4.py プロジェクト: strangest-quark/iConsent
    def generate_video_part(self, txnId):
        if not self.config.LOCAL:
            os.chdir("/var/task/")
        W, H = self.config.VIDEO_SIZE
        calendar(self.config, 'fifrom', txnId)
        calendar(self.config, 'fito', txnId)
        bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_7.png")
        calendar_from_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fifrom-' + txnId + '.png'). \
            set_position((W / 2 - 170, H / 4)).resize(width=self.config.ICON_SIZE)
        calendar_to_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fito-' + txnId + '.png'). \
            set_position((W / 2 + 80, H / 4)).resize(width=self.config.ICON_SIZE)
        self.text_to_speech(self.fill_text(self.fill_text(Frame4.lang_map.get('audio4'), 1), 2), Frame4.lang_map.get('lan'), txnId)
        audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" + txnId + "-4.mp3")
        Frame4.map['text4'] = self.fill_text(self.fill_text(Frame4.lang_map.get('text4'), 1), 2)
        straight_text(Frame4.map['text4'], Frame4.lang_map.get('font'), Frame4.lang_map.get('fontsize4'), txnId, 4, self.config)
        text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE + '-text-4-' + txnId + '.png')
        video = mpy.CompositeVideoClip(
            [
                bgImage,
                calendar_from_logo,
                calendar_to_logo,
                text.set_position(('center', calendar_to_logo.size[1] + 20)),
            ],
            size=self.config.VIDEO_SIZE). \
            on_color(
            color=self.config.WHITE,
            col_opacity=1).set_duration(audioclip.duration)

        new_audioclip = CompositeAudioClip([audioclip])
        video.audio = new_audioclip
        os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId + '-4.mp3')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + '-text-4-' + txnId + '.png')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fifrom-' + txnId + '.png')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fito-' + txnId + '.png')
        return video, 4
コード例 #26
0
ファイル: titles.py プロジェクト: storyfeet/moviecutter
def blueInfo(message,
             wsize,
             start=0,
             cenpos=None,
             duration=9,
             bcol=None,
             align='center'):
    sx, sy = wsize

    if cenpos == None:
        cenpos = (sx / 2, sy / 2)
    px, py = cenpos
    dx = min(px, sx - px)
    res = mped.TextClip(message,
                        font='Courier-bold',
                        fontsize=dx * 2.5 / longLine(message),
                        color="blue",
                        align=align).set_duration(duration)
    rsx, rsy = res.size

    if bcol == None:
        return res.set_position((px - rsx / 2, py - rsy / 2)).set_start(start)

    # add Background Square
    colClip = mped.ColorClip(res.size, bcol, duration=duration)

    return mped.CompositeVideoClip([colClip, res], size=res.size).set_position(
        (px - rsx / 2, py - rsy / 2)).set_start(start)
コード例 #27
0
def annotate(clip, txt, txt_color='red', fontsize=30, font='Xolonium-Bold'):
    txtclip = editor.TextClip(txt,
                              fontsize=fontsize,
                              font=font,
                              color=txt_color)
    cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'top'))])
    return cvc.set_duration(clip.duration)
コード例 #28
0
def _process_track(instruments, instrument_names, source_dir,
                   instrument_config, notes, pulse_length, width, height,
                   max_velocity, queue, file_name, volumes, num_sim_tracks):
    """
    Composes one midi track into a stop motion video clip.
    Writes a file of this with the given file name.
    """
    try:
        instrument_clips = {
            name: _load_instrument_clips(name, instruments[name], source_dir,
                                         instrument_config)
            for name in instrument_names
        }
        parsed_clips = []
        scale_factor = int(math.floor(math.log(num_sim_tracks, 2) + 1))
        if os.path.isfile(file_name):
            queue.put((MSG_PROCESSED_SEGMENT, 0))
            queue.put((MSG_DONE, 1))
            return
        for note in notes:
            note_number = note.note_number
            clips, min_vol = instrument_clips[note.instrument_name]
            vol = 0.5
            if volumes is not None:
                vol = volumes.get(note.instrument_name, 0.5)

            c, offset, max_vol = clips[note_number]
            clip = c.copy()
            num_sim_notes = note.get_num_sim_notes()

            x, y, w, h = _partition(width, height, num_sim_notes,
                                    note.video_position)

            volume = (float(note.velocity) / float(max_velocity)) * (min_vol /
                                                                     max_vol)

            clip = clip.subclip(offset)
            clip = clip.set_start((note.start) * pulse_length)
            clip = clip.volumex(volume * vol)
            d = clip.duration
            clip = clip.set_duration(min(note.duration * pulse_length, d))
            clip = clip.set_position((x // scale_factor, y // scale_factor))
            clip = fx.resize(clip,
                             newsize=(w // scale_factor, h // scale_factor))
            parsed_clips.append(clip)
        track_clip = edit.CompositeVideoClip(size=(width // scale_factor,
                                                   height // scale_factor),
                                             clips=parsed_clips)
        track_clip.write_videofile(file_name,
                                   fps=30,
                                   verbose=False,
                                   progress_bar=False)

        queue.put((MSG_PROCESSED_SEGMENT, 0))
        queue.put((MSG_DONE, 1))

    except Exception as e:
        queue.put((MSG_FATAL_ERROR, e))
        traceback.print_exc(file=sys.stdout)
コード例 #29
0
ファイル: frame_2.py プロジェクト: strangest-quark/iConsent
    def generate_video_part(self, txnId):
        if not self.config.LOCAL:
            os.chdir("/var/task/")
        W, H = self.config.VIDEO_SIZE
        bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_2.png")
        fipList = self.input_map.get("fip")

        fip_x_position = 0
        fip_y_position = 0
        fip_img_path = ''

        num_images_pow = pow(2, len(fipList))

        if len(fipList) == 1:
            fip_x_position = W / 4 - self.config.BANK_ICON_SIZE / 3
            fip_img_path = self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(
                fipList[0])
            fip_y_position = H / 4
            single = True
        else:
            fip_x_position = W / 6 - self.config.BANK_ICON_SIZE / 3
            fip_y_position = int(H / num_images_pow)
            self.concatenate_images(fipList, txnId)
            fip_img_path = self.config.SB_LOGO_PATH_PREFIX_WRITE + 'combined-' + txnId + '-banks.png'
            single = False

        height_final_image = self.config.BANK_ICON_SIZE * int(
            math.ceil(len(fipList) / 2))

        fip_logo = mpy.ImageClip(fip_img_path).set_position(
            (fip_x_position, fip_y_position)).resize(height=height_final_image)
        self.text_to_speech(self.fill_text(Frame2.lang_map.get('audio2')),
                            Frame2.lang_map.get('lan'), txnId)
        audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" +
                                  txnId + "-2.mp3")
        Frame2.map['text2'] = self.fill_text(Frame2.lang_map.get('text2'))
        straight_text(Frame2.map['text2'], Frame2.lang_map.get('font'),
                      Frame2.lang_map.get('fontsize2'), txnId, 2, self.config)
        text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE +
                             '-text-2-' + txnId + '.png')
        video = mpy.CompositeVideoClip(
            [
                bgImage,
                fip_logo,
                text.set_position((W / 5 + 50, H / 5)),
            ],
            size=self.config.VIDEO_SIZE). \
            on_color(
            color=self.config.WHITE,
            col_opacity=1).set_duration(audioclip.duration)
        new_audioclip = CompositeAudioClip([audioclip])
        video.audio = new_audioclip
        os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId +
                  '-2.mp3')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + '-text-2-' + txnId +
                  '.png')
        if not single:
            os.remove(fip_img_path)
        return video, 2
コード例 #30
0
def annotate(clip, txt, txt_color='white', fontsize=30, font='MDotum'):
    # Writes a text at the bottom of the clip  'Xolonium-Bold'
    txtclip = editor.TextClip(txt,
                              fontsize=fontsize,
                              font=font,
                              color=txt_color).on_color(color=[0, 0, 0])
    cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 50))])
    return cvc.set_duration(clip.duration)