示例#1
0
def main():
    clips = []
    with open("names.txt") as f:
        name = f.readlines()
        print(name)
        for i in name:
            i = i.split('\n')[0]
            clips.append(make(i))
    print(clips)
    concatenate_videoclips(clips).set_fps(30).write_videofile("飞跃起点理.mp4")
    exit()
    clip1 = ImageClip("./images/2.jpg")
    txt = TextClip("吼哇!123ASDasd".encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=48)
    txt_col = txt.on_color(size=(clip1.w, txt.h + 10),
                           color=(0, 0, 0),
                           pos=(6, 'center'),
                           col_opacity=0.6).set_pos(lambda t: ((200), (800)))
    w, h = moviesize = clip1.size
    txt_mov = txt_col.set_pos(lambda t: (max(w / 30, int(w - 1 * w * t)),
                                         max(5 * h / 6, int(100 * t))))

    CompositeVideoClip([
        clip1, txt_mov
    ]).set_duration(1).set_fps(30).write_videofile("my_concatenation.mp4")
    CompositeVideoClip([clip1, txt_mov
                        ]).set_duration(1).set_fps(30).save_frame("test.png",
                                                                  t="00:00:01")
示例#2
0
def compose(resources, params):
    """Creates a video clip out of the videos and the images of the game as well as the audio from the description"""

    # Set up a variable to save the duration of the clip
    current_duration = 0

    # Set the limit parameters
    process_images = True
    process_videos = True
    process_audio = True

    # Set if the images should be processed
    if 'image_limit' in params:
        if params['image_limit'] == 0:
            process_images = False

    # Set if the videos should be processed
    if 'video_limit' in params:
        if params['video_limit'] == 0:
            process_videos = False

    # Set if audio should be processed
    if 'generate_audio' in params:
        process_audio = params['generate_audio']

    # Add the videos to the composed clip
    if process_videos:
        for video in range(len(resources['videos'])):
            # Set the start of each video
            resources['videos'][video] = VideoFileClip(resources['videos'][video]).set_start(current_duration)

            # Set the new duration of the clip
            current_duration += resources['videos'][video].duration

    # Add the images to the composed clip
    if process_images:
        for image in range(len(resources['images'])):
            # Get the images into a work variable
            tmp = resources['images'][image]

            # Create an image clip and set the start properly
            resources['images'][image] = ImageClip(resources['images'][image], duration=5).set_start(current_duration)

            # Set the name of the image clip
            resources['images'][image].filename = tmp

            # Set the new duration for the clip
            current_duration += resources['images'][image].duration

    # Add the audio to the video clip
    if process_audio:
        # Create the final clip with audio
        return CompositeVideoClip(resources['videos'] + resources['images']).set_audio(
            set_up_audio_clip(resources['audio']))

    # Create the final clip without audio
    return CompositeVideoClip(resources['videos'] + resources['images'])
示例#3
0
def mergeVideos():
    clip1 = VideoFileClip("text.mp4").subclip(0, 15)
    print(clip1.duration)
    clip2 = VideoFileClip("mhls.mp4").subclip(0, 15).resize(0.5)
    video = CompositeVideoClip([clip1,
                                clip2
                                ])
    CompositeVideoClip([clip1.set_pos("left", "center"), clip2.set_pos("right", "center")],
                       size=(clip1.w + clip1.w, clip2.h))

    video.write_videofile("merge_video.mp4")
示例#4
0
    def make_crab(self, t, u_id):
        """Non blocking crab rave video generation from DankMemer bot
        
        https://github.com/DankMemer/meme-server/blob/master/endpoints/crab.py
        """
        fp = str(cog_data_path(self) / f"Verdana.ttf")
        clip = VideoFileClip(str(cog_data_path(self)) + "/template.mp4")
        text = TextClip(t[0], fontsize=48, color="white", font=fp)
        text2 = (
            TextClip("____________________", fontsize=48, color="white", font=fp)
            .set_position(("center", 210))
            .set_duration(15.4)
        )
        text = text.set_position(("center", 200)).set_duration(15.4)
        text3 = (
            TextClip(t[1], fontsize=48, color="white", font=fp)
            .set_position(("center", 270))
            .set_duration(15.4)
        )

        video = CompositeVideoClip(
            [clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1)]
        ).set_duration(15.4)
        video.write_videofile(
            str(cog_data_path(self)) + f"/{u_id}crabrave.mp4",
            threads=1,
            preset="superfast",
            verbose=False,
            logger=None,
            temp_audiofile=str(cog_data_path(self) / "crabraveaudio.mp3")
        )
        clip.close()
        video.close()
        return True
示例#5
0
 def Gen_Video(self, beat_times, mp3path, uuid):
     FONT_URL = '../font/heimi.TTF'
     with open(uuid + '.txt', 'r', encoding='utf-8') as f:
         text_str = f.read()
     word_list = text_str.split('\n')
     clips = []
     for index, beat_time in enumerate(beat_times[:-1]):
         if index >= len(word_list):
             break
         print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
         text_clip = TextClip(
             word_list[index],
             fontsize=320 // 8,
             color='white',
             size=(320, 640),
             method='caption',
             font=FONT_URL) \
             .set_start(beat_time) \
             .set_end(beat_times[index + 1])
         text_clip = text_clip.set_pos('center')
         clips.append(text_clip)
     final_clip = CompositeVideoClip(clips)
     audio_clip = AudioFileClip(mp3path)
     final_video = final_clip.set_audio(audio_clip)
     final_video.write_videofile(str(uuid) + '.mp4',
                                 fps=30,
                                 codec='mpeg4',
                                 preset='ultrafast',
                                 audio_codec="libmp3lame",
                                 threads=4)
示例#6
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.gif'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/kowalski/kowalski.gif")
        text = TextClip(text,
                        fontsize=36,
                        method='caption',
                        size=(245, None),
                        align='West',
                        color='black',
                        stroke_color='black',
                        stroke_width=1,
                        font='Verdana').set_duration(clip.duration)
        text = text.set_position((340, 65)).set_duration(clip.duration)
        text = rotate(text, angle=10, resample='bilinear')

        video = CompositeVideoClip([clip, text]).set_duration(clip.duration)

        video.write_gif(name)
        clip.close()
        video.close()
        return send_file(name, mimetype='image/gif')
def generate_video(saved_model_path, video_category=None):
    """Uses the trained model to predict the frames and produce a video out of them"""
    # load model
    model = load_model(saved_model_path)

    which_one = video_category
    train_files, test_files = get_train_test_files(which=which_one)
    test_gen = get_data_gen(files=test_files,
                            timesteps=timesteps,
                            batch_size=batch_size,
                            im_size=(im_width, im_height))

    y_true = []
    y_pred = []

    for _ in range(200):
        x, y = next(test_gen)
        y_true.extend(y)

        predictions = model.predict_on_batch(x)
        y_pred.extend(predictions)

    clip1 = ImageSequenceClip([denormalize(i) for i in y_true], fps=5)
    clip2 = ImageSequenceClip([denormalize(i) for i in y_pred], fps=5)
    clip2 = clip2.set_position((clip1.w, 0))
    video = CompositeVideoClip((clip1, clip2), size=(clip1.w * 2, clip1.h))
    video.write_videofile(
        "{}.mp4".format(which_one if which_one else "render"), fps=5)
示例#8
0
def main(url, output):
    driver = webdriver.Chrome()
    remote_url = url
    driver.get(remote_url)
    
    png = chrome_takeFullScreenshot(driver)
    with open("website_image.png", 'wb') as f:
        f.write(png)

    driver.close()

    clip = ImageClip('website_image.png')
    
    video_width = int(clip.size[0] + 800)
    video_height = int(video_width/1.5)

    bg_clip = ColorClip(size=(video_width, video_height), color=[228, 220, 220])

    scroll_speed = 180
    total_duration = (clip.h - 800)/scroll_speed

    fl = lambda gf,t : gf(t)[int(scroll_speed*t):int(scroll_speed*t)+800,:]
    clip = clip.fl(fl, apply_to=['mask'])

    video = CompositeVideoClip([bg_clip, clip.set_pos("center")])
    video.duration = total_duration
    if not output.endswith('.mp4'):
        output += '.mp4'
    video.write_videofile(output, fps=26)
    os.remove('website_image.png')
    def composite_clips(self, clips: dict):
        try:
            watermark = ImageClip(self.overlay).set_position((0.7, 0.1),
                                                             relative=True)
        except FileNotFoundError:
            logging.warning(
                "No watermark found -> video will be created without watermark"
            )
            watermark = None

        # Requires metadata about the clip
        txts = self.generate_clip_text(self.metadata)

        composite_clips = {}
        for clip_id, clip in clips.items():
            composition = []
            duration = clip.duration
            composition.append(clip)

            if watermark:
                composition.append(watermark.set_duration(duration))
            composition.append(txts[clip_id].set_duration(duration))
            composite_clips[clip_id] = CompositeVideoClip(composition,
                                                          size=self.target_res)
        return composite_clips
示例#10
0
def make_zoom(scale_func,
              path=im_path,
              cx=32,
              cy=32,
              scale=10,
              duration=5,
              fps=10,
              oversample=2.0):
    ic = ImageClip(path).resize(oversample)
    bg = ColorClip((ic.w, ic.h), (0xFF, 0xFF, 0xFF)).set_duration(duration)

    ic.duration = duration
    cx *= oversample
    cy *= oversample
    total_frames = int(duration * fps)

    def zoom_between_frames(startf, endf):
        scales = [
            scale_func(startf + f * (endf - startf) / total_frames)
            for f in range(total_frames)
        ]
        return make_zoom_movie(ic, scales, fps, (cx, cy))

    # we seem to get two multiple frames at the start...
    # and end sometimes
    ret = CompositeVideoClip([
        bg,
        zoom_between_frames(total_frames, 2.0 * total_frames),
        zoom_between_frames(0, total_frames)
    ])
    ret.size = ic.size
    # ret.duration = duration
    return ret.resize(1.0 / oversample)
示例#11
0
def aggregate_videos():
    current_dir = os.getcwd()
    video_paths = extract_video_paths(path='video_files')

    clips_aggregated = list()
    names_aggregated = list()
    clips = list()
    names = list()
    clips_dur = 0
    for video in video_paths:
        file_path = os.path.join(current_dir, 'video_files', video)
        clip = VideoFileClip(file_path)
        clip = CompositeVideoClip([clip], size=DIMENSION)
        if clips_dur < MAX_DUR:
            clips.append(clip)
            names.append(video)
            clips_dur += clip.duration
        else:
            clips_aggregated.append(clips)
            names_aggregated.append(names)
            clips = list()
            names = list()
            clips_dur = 0
    if clips_dur != 0:
        clips_aggregated.append(clips)
        names_aggregated.append(names)
    return clips_aggregated, names_aggregated
示例#12
0
def generate_caption(pos, proj_id, vid_id, capt, colour, font, font_size):
    pos = [int(p) for p in pos]
    print(pos)
    vid = VideoFileClip(
        os.path.join(app.config['BASE_DIR'], app.config['VIDS_LOCATION'],
                     str(proj_id),
                     str(vid_id) + ".mp4"))
    vid = vid.subclip(0, vid.duration).resize((852, 480))

    print(int(sizes[font_size] * 0.44357))
    caption = TextClip(txt=capt,
                       fontsize=int(sizes[font_size] * 0.44357),
                       color='#' + str(colour),
                       font=font).set_position(pos).set_duration(vid.duration)

    vid = CompositeVideoClip([vid, caption])

    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')

    vid.save_frame(os.getcwd() + '/app/static/img/' + str(proj_id) +
                   '/{}_frame_caption_{}.png'.format(str(vid_id), st))

    return '/static/img/{}/{}_frame_caption_{}.png'.format(
        str(proj_id), str(vid_id), st)
示例#13
0
def write_video_file(file_path, pred_label_score, gt_info, save_dir):
    video_clip = VideoFileClip(file_path)
    text_clip = TextClip(txt=pred_label_score,
                         font='utils/SimHei.ttf',
                         color='white',
                         fontsize=32,
                         bg_color='black',
                         align='West').set_pos(
                             ("left", "top")).set_duration(video_clip.duration)
    compose_list = [video_clip, text_clip]
    if gt_info != "":
        gt_text_clip = TextClip(txt=gt_info,
                                font='utils/SimHei.ttf',
                                color='white',
                                fontsize=32,
                                bg_color='black',
                                align='East').set_pos(
                                    ("right", "bottom")).set_duration(
                                        video_clip.duration)
        compose_list.append(gt_text_clip)
    result = CompositeVideoClip(compose_list)
    video_name = os.path.basename(file_path)
    result.write_videofile(save_dir + "/" + video_name,
                           fps=25,
                           codec='libx264',
                           audio_codec='aac',
                           temp_audiofile='temp-audio.m4a',
                           remove_temp=True)
示例#14
0
        def process_clip():
            clip = VideoFileClip(file_path, target_resolution=[720, 1280])
            # I WAS going to get the last 10 seconds but nvm
            if clip.duration > 10:
                clip = clip.subclip(0, -clip.duration + 10)

            safe_duration = max(0, clip.duration - 0.1)

            # Freeze fram stuff
            freeze_frame_sound = AudioFileClip(
                "assets/wellberightback/sound.mp3")
            freeze_frame = ImageClip(clip.get_frame(safe_duration))\
                .fx(vfx.painting, black=0.001)\
                .fx(vfx.colorx, factor=0.8).set_duration(freeze_frame_sound.duration)
            text = ImageClip("assets/wellberightback/text.png")\
                .set_pos( lambda t: (50, 50) )
            freeze_compos = CompositeVideoClip([freeze_frame, text])\
                .set_duration(freeze_frame_sound.duration).set_audio(freeze_frame_sound)

            # Final clip
            final_clip = concatenate_videoclips([clip, freeze_compos])

            return final_clip, [
                clip, freeze_frame_sound, freeze_frame, text, freeze_compos
            ]
def label_clip(video_path, label, start_second, end_second):
    clip = VideoFileClip(video_path)
    text_clip = TextClip(label, fontsize=40, color='white', bg_color='red')
    text_clip = text_clip.set_pos(('center', 'bottom'))
    text_clip = text_clip.set_start(start_second).set_duration(end_second -
                                                               start_second)
    return CompositeVideoClip([clip, text_clip])
示例#16
0
def collage(output_video, *input_videos):
    input_clips = []
    for path in input_videos:
        video_clip = VideoFileClip(path)
        _, _, amp = os.path.basename(path).partition("@")
        amp, _, _ = amp.partition('.')
        text_clip = (TextClip(
            txt='Amplified {}'.format(amp) if amp else 'Input',
            color='white',
            method='label',
            fontsize=32,
            font='Helvetica-Bold').set_duration(
                video_clip.duration).set_position(('center', 0.05),
                                                  relative=True))
        clip = CompositeVideoClip((video_clip, text_clip), use_bgclip=True)
        input_clips.append(clip)
    if len(input_clips) < 4:
        num_columns = 1
    elif len(input_clips) < 5:
        num_columns = 2
    else:
        num_columns = 3
    final_clip = clips_array([
        input_clips[i:i + num_columns]
        for i in range(0, len(input_clips), num_columns)
    ])
    final_clip.write_videofile(output_video, audio=False)
    return output_video
示例#17
0
def create_mtg_gif(name, id, border):
    if border == 'm':  # Modern (post-8th Ed)
        card_upper_corner = (19, 38)
        gif_width = 202 - card_upper_corner[0]
        gif_height = 172 - card_upper_corner[1]
    elif border == 'c':  # Current (post-Magic 2015)
        card_upper_corner = (17, 34)
        gif_width = 204 - card_upper_corner[0]
        gif_height = 173 - card_upper_corner[1]
    else:  # Old (pre-8th Ed)
        card_upper_corner = (25, 30)
        gif_width = 196 - card_upper_corner[0]
        gif_height = 168 - card_upper_corner[1]

    mtg_card = Image.open(BytesIO(requests.get(get_mtg_image(id)).content))
    mtg_card = ImageClip(np.asarray(mtg_card)).resize((222, 310))

    get_giphy_gif(name)
    giphy_gif = (VideoFileClip(
        'giphy_gif.mp4',
        target_resolution=(gif_height, gif_width)).set_pos(card_upper_corner))

    if giphy_gif.duration < 2:
        giphy_gif = giphy_gif.fx(loop, n=1 + int(2 // giphy_gif.duration))

    mtg_gif = CompositeVideoClip([mtg_card, giphy_gif])
    mtg_gif = mtg_gif.set_start(0).set_duration(giphy_gif.duration)
    # mtg_gif.write_gif("mtg_gif.gif")
    mtg_gif.write_videofile("mtg_gif.mp4",
                            codec='libx264',
                            bitrate=str(np.power(10, 7)),
                            verbose=False,
                            progress_bar=False,
                            audio=False,
                            ffmpeg_params=['-pix_fmt', 'yuv420p'])
示例#18
0
def create_video_of_list_of_clips(clips, output):
    print('Rendering video to location  %s' % (output))
    final_clips = []

    for clip in clips:
        path = constants.DOWNLOAD_LOCATION + clip.channel.slug + '/' + clip.slug + '.mp4'

        print(path)

        video = VideoFileClip(path)
        title = TextClip(txt=clip.channel.name + ': ' + clip.title,
                         font='Amiri-regular',
                         color='white',
                         fontsize=55).set_duration(8)
        title_mov = title.set_pos((0.05, 0.8), relative=True)

        # Create video object with text
        final_clip = CompositeVideoClip([video, title_mov]).resize((1280, 720))
        final_clips.append(final_clip)

        # Remove from memory
        del title
        del video
        del final_clip

    # Add clips together
    finished = concatenate_videoclips(final_clips, method='compose')

    # Render video
    finished.write_videofile(output, fps=30)
示例#19
0
def make_test_vid(note_vid, segments, fname):

    t = 0

    clips = []
    padding = 2
    i = 0

    for note, (start, end) in segments:
        clip = note_vid.subclip(start, end)
        clip = clip.set_start(t)

        clips.append(clip)

        txt = (TextClip("%d %s" % (i, note),
                        color='white',
                        font='Ubuntu-Bold',
                        fontsize=22).margin(1).margin(
                            top=30, left=30, opacity=0.0).set_pos(
                                ('left',
                                 'top')).set_duration(end - start +
                                                      padding).set_start(t))
        clips.append(txt)

        t += (end - start) + padding
        i += 1

        print(t, i)

    full_video = CompositeVideoClip(clips)
    print('full length %f' % full_video.duration)
    full_video.write_videofile(fname, threads=20)
示例#20
0
def make_clip_piano(clips, loop=False, width=100, height=100):
    """
    http://zulko.github.io/moviepy/getting_started/compositing.html
    """
    assert(len(clips) == 12)
    white_key_inds = [0,2,4,5,7,9,11]
    black_key_inds = [1,3,6,8,10]
    white_key_px = np.arange(0.0, 1.0, 1.0/7)
    black_key_px = white_key_px + (1.0/14)
    black_key_px = np.hstack([black_key_px[:2], black_key_px[3:]])

    # arrange clips as if on a keyboard
    cur_clips = [load_bg_clip(7*width)]
    for c in xrange(12):
        if c in white_key_inds:
            px = white_key_px[white_key_inds.index(c)]
            py = 0.5
        else:
            px = black_key_px[black_key_inds.index(c)]
            py = 0.0
        cur_clip = clips[c]
        sz = min(cur_clip.w, cur_clip.h)
        cur_clip = cur_clip.crop(x_center=cur_clip.w/2, y_center=cur_clip.h/2, width=sz, height=sz) # crop to be square (centered)
        cur_clip = cur_clip.resize(width=width) # fit within grid cell
        cur_clip = cur_clip.set_pos((px, py), relative=True) # place in grid
        if loop:
            cur_clip = cur_clip.loop()
        cur_clips.append(cur_clip)
    return CompositeVideoClip(cur_clips, size=(7*width, 2*height))
示例#21
0
def save_out(tracks, outfile=None, filetype='mp4'):

    out = []

    vids = [t for t in tracks if t['type'] == 'vid']
    texts = [t for t in tracks if t['type'] == 'text']

    for v in vids:
        c = VideoFileClip(v['content']).subclip(v['in'],
                                                v['in'] + v['duration'])
        c = c.set_start(v['start'])
        out.append(c)

    size = out[0].size

    for t in texts:
        c = create_sub(t['content'], size, rect_offset=195, min_height=55)
        c = c.set_start(t['start'])
        c = c.set_duration(t['duration'])
        out.append(c)

    final_clip = CompositeVideoClip(out)
    if outfile is None:
        outfile = 'msg_' + str(int(time.time())) + '.mp4'
    if filetype == 'gif':
        outfile = outfile.replace('.mp4', '.gif')
        final_clip.speedx(1.7).write_gif(outfile, fps=7, loop=1)
    else:
        final_clip.write_videofile(outfile, fps=24, codec='libx264')
    return outfile
示例#22
0
def centerMerge():
    clip1 = VideoFileClip("text.mp4", audio=False).resize([540, 1024])
    print(clip1.duration)
    clip3 = VideoFileClip("cut_video.mp4", has_mask=True, audio=True)
    video = CompositeVideoClip([clip1, clip3.set_position('center')])
    video.write_videofile("centermergr.mp4")  # 先不加音频
    video.close()
示例#23
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.mp4'
        if len(text) >= 400:
            text = text[:400] + '...'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/letmein/letmein.mp4")



        textclip = TextClip(txt=text, bg_color='White', fontsize=32, font='Verdana', method='caption', align='west', size=(clip.size[0], None)).set_duration(clip.duration)

        color = ColorClip((clip.size[0], textclip.size[1]), color=(255, 255, 255), ismask=False).set_duration(clip.duration)

        video = CompositeVideoClip([clip.set_position(("center", textclip.size[1])), color, textclip],
                                   size=(clip.size[0], textclip.size[1] + clip.size[1]))

        video.write_videofile(name, threads=4, preset='superfast', verbose=False)
        clip.close()
        video.close()
        return send_file(name, mimetype='video/mp4')
示例#24
0
def merge_videos(filepath_1, filepath_2, filepath_out):
    """
    Overlay second video in the bottom right corner of the first video.
    """
    # If the video generation failed, merge fails
    if not os.path.isfile(filepath_1) or not os.path.isfile(filepath_2):
        print("Error: The filepath(s) are invalid.")
        return False

    # Merge original lesson video with Wav2Lip result video
    clip1 = VideoFileClip(fr'{filepath_1}')  # Use ./ instead of /
    clip2 = VideoFileClip(fr'{filepath_2}')

    clip2 = resize_clip_wrt(clip1, clip2)
    composite_clip = CompositeVideoClip([
        clip1,
        clip2.set_position(("right", "bottom")).set_start(0).crossfadein(1)
    ])

    # Use a temp audio file if audio is not working
    # It seems overriding an existing file will result in second video not running correctly
    try:
        # final_clip.write_videofile(r'./results/result_voice.mp4')
        composite_clip.write_videofile(fr'{filepath_out}',
                                       codec='libx264',
                                       audio_codec='aac',
                                       temp_audiofile='temp-audio.m4a',
                                       remove_temp=True)
        return True
    except Exception as e:
        print(e)
        return False
示例#25
0
def gifEngine(starttime,
              endtime,
              videofileloc,
              srtfileloc,
              outfileloc,
              logger='gifEngine.log'):
    logging.basicConfig(filename=logger, level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    prolog.basic_config()
    # creating the initial GIF
    try:
        generator = lambda txt: TextClip(
            txt, font='Impact', fontsize=28, color='white')
        video = VideoFileClip(videofileloc)
        sub = SubtitlesClip(srtfileloc, generator).set_position(
            ("center", "bottom"), relative=True)
        composite = CompositeVideoClip([video, sub])
        composite = composite.subclip(starttime, endtime)
        composite.write_gif(outfileloc,
                            program='ffmpeg',
                            opt='palettegen',
                            logger=logger,
                            verbose=True)  # using new palettegen opt
        return 0
    except (IOError, OSError) as err:
        return err
示例#26
0
def video_collage(vid_names, ordering, probs, correct_class_prob, labels):
    sel_clips = []
    for i in range(6):
        vname = vid_names[ordering[i]]
        # Load all the metadata for this video
        with open(vname.replace(
                'images/', 'scenes/').replace('.avi', '.json'), 'r') as fin:
            metadata = json.load(fin)
        gt = np.argmax(labels[ordering[i]])
        pred = np.argmax(probs[ordering[i]])
        pred_map = location_predictions_to_map(probs[ordering[i]], metadata)
        print(vname, gt, pred, correct_class_prob[ordering[i]])
        main_video = VideoFileClip(vname).margin(3)
        sel_clips.append(CompositeVideoClip([
            clips_array([[
                main_video,
                ImageSequenceClip(pred_map, fps=main_video.fps),
            ]]),
            TextClip(
                "GT {} Pred {}".format(gt, pred),
                font=MOVIEPY_FONT)
            .set_pos((10, 10))
            .set_duration(main_video.duration),
        ]))
    return clips_array(chunkify(sel_clips, 3))
示例#27
0
def createPartition(index, videoData):
    os.chdir("videos/" + str(index))
    #Get the start and end second from the YAML Config and edit all of the videos into clips that can be concatinated.
    start = videoData['startFrame']
    end = videoData['endFrame']
    print(start, end)
    mp4 = findmp4()
    print(mp4)
    os.chdir(cwd)
    fileLoc = 'videos' + '\\' + str(index) + '\\' + mp4
    video = VideoFileClip(fileLoc).subclip(start - 4, end + 2).fx(
        vfx.fadeout, duration=1).fx(vfx.fadein, duration=5)
    # Make the text. Many more options are available.
    txt_clip = (TextClip(videoData['date'],
                         fontsize=35,
                         color='white',
                         font='Hans Kendrick').set_position(
                             ("center", 80)).set_duration(5).fx(
                                 vfx.fadeout, duration=1.5).fx(vfx.fadein,
                                                               duration=3))

    result = CompositeVideoClip([video, txt_clip])  # Overlay text on video

    result.write_videofile(
        "partitions\\" + str(index) +
        ".mp4")  # Write the partition into a new partition folder
    os.chdir(cwd)
    video.close()
    txt_clip.close()
    result.close()
示例#28
0
 def prepend_intertitle(
     self,
     size: Optional[Size] = None,
     color: str = DEFAULT_INTERTITLE_COLOR,
     font: str = DEFAULT_INTERTITLE_FONT,
     fontsize: int = DEFAULT_INTERTITLE_FONTSIZE,
     position: str = DEFAULT_INTERTITLE_POSITION,
     duration: int = DEFAULT_INTERTITLE_DURATION,
 ):
     if not self.meta.text:
         logger.warning('%s: Missing intertitle text')
         return
     logger.info('%s: Intertitle "%s"', self.meta.path, self.meta.text)
     if not size:
         size = Size(width=self.video_file_clip.w,
                     height=self.video_file_clip.h)
     text_clip = TextClip(
         self.meta.text.replace('|', '\n'),
         size=(size.width * INTERTITLE_TEXT_WIDTH_FACTOR, None),
         color=color,
         font=font,
         fontsize=fontsize,
         method='caption',
         align='center',
     )
     composite_clip = CompositeVideoClip([text_clip.set_pos(position)],
                                         (size.width, size.height))
     intertitle_clip = composite_clip.subclip(0, duration)
     self.video_file_clip = concatenate_videoclips(
         [intertitle_clip, self.video_file_clip], method='compose')
示例#29
0
 def create_thumbnail(self, clip: Clip):
     logging.info("Creating yt thumbnail")
     thumbnail_base = self.get_thumbnail_base(clip)
     emoji = self.get_emoji()
     overlay = ImageClip(os.path.join(self.asset_path, "overlay_thumbnail.png")).set_opacity(0.8)
     number = self.get_number_textclip()
     try:
         logo = (
             ImageClip(os.path.join(self.asset_path, utils.get_valid_game_name(self.game), "game_logo.png"))
             .fx(resize, 1.3)
             .set_position((0.04, 0.6), relative=True)
         )
     except FileNotFoundError:
         logging.warning("No game_logo in associated asset folder -> thumbnail will be created without logo")
         logo = None
     thumbnail = [
         thumbnail_base.set_duration(None),
         emoji.set_duration(None),
         overlay.set_duration(None),
         number.set_duration(None),
     ]
     if logo:
         thumbnail.append(logo.set_duration(None))
     thumbnail_result = CompositeVideoClip(thumbnail, size=[1280, 720])
     thumbnail_result.save_frame(os.path.join(self.compilation_dir, "thumbnail.png"), t=0, withmask=True)
示例#30
0
 def add_caption(caption, clip):
     text = (TextClip(caption,
                      font='Amiri-regular',
                      color='white',
                      fontsize=80).margin(40).set_duration(
                          clip.duration).on_color(
                              color=(0, 0, 0), col_opacity=0.6))
     return CompositeVideoClip([clip, text])