示例#1
0
    def make_crab(self, t, u_id):
        """Non blocking crab rave video generation from DankMemer bot
        
        https://github.com/DankMemer/meme-server/blob/master/endpoints/crab.py
        """
        fp = str(cog_data_path(self) / f"Verdana.ttf")
        clip = VideoFileClip(str(cog_data_path(self)) + "/template.mp4")
        text = TextClip(t[0], fontsize=48, color="white", font=fp)
        text2 = (
            TextClip("____________________", fontsize=48, color="white", font=fp)
            .set_position(("center", 210))
            .set_duration(15.4)
        )
        text = text.set_position(("center", 200)).set_duration(15.4)
        text3 = (
            TextClip(t[1], fontsize=48, color="white", font=fp)
            .set_position(("center", 270))
            .set_duration(15.4)
        )

        video = CompositeVideoClip(
            [clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1)]
        ).set_duration(15.4)
        video.write_videofile(
            str(cog_data_path(self)) + f"/{u_id}crabrave.mp4",
            threads=1,
            preset="superfast",
            verbose=False,
            logger=None,
            temp_audiofile=str(cog_data_path(self) / "crabraveaudio.mp3")
        )
        clip.close()
        video.close()
        return True
示例#2
0
def centerMerge():
    clip1 = VideoFileClip("text.mp4", audio=False).resize([540, 1024])
    print(clip1.duration)
    clip3 = VideoFileClip("cut_video.mp4", has_mask=True, audio=True)
    video = CompositeVideoClip([clip1, clip3.set_position('center')])
    video.write_videofile("centermergr.mp4")  # 先不加音频
    video.close()
示例#3
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.gif'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/kowalski/kowalski.gif")
        text = TextClip(text,
                        fontsize=36,
                        method='caption',
                        size=(245, None),
                        align='West',
                        color='black',
                        stroke_color='black',
                        stroke_width=1,
                        font='Verdana').set_duration(clip.duration)
        text = text.set_position((340, 65)).set_duration(clip.duration)
        text = rotate(text, angle=10, resample='bilinear')

        video = CompositeVideoClip([clip, text]).set_duration(clip.duration)

        video.write_gif(name)
        clip.close()
        video.close()
        return send_file(name, mimetype='image/gif')
示例#4
0
def createPartition(index, videoData):
    os.chdir("videos/" + str(index))
    #Get the start and end second from the YAML Config and edit all of the videos into clips that can be concatinated.
    start = videoData['startFrame']
    end = videoData['endFrame']
    print(start, end)
    mp4 = findmp4()
    print(mp4)
    os.chdir(cwd)
    fileLoc = 'videos' + '\\' + str(index) + '\\' + mp4
    video = VideoFileClip(fileLoc).subclip(start - 4, end + 2).fx(
        vfx.fadeout, duration=1).fx(vfx.fadein, duration=5)
    # Make the text. Many more options are available.
    txt_clip = (TextClip(videoData['date'],
                         fontsize=35,
                         color='white',
                         font='Hans Kendrick').set_position(
                             ("center", 80)).set_duration(5).fx(
                                 vfx.fadeout, duration=1.5).fx(vfx.fadein,
                                                               duration=3))

    result = CompositeVideoClip([video, txt_clip])  # Overlay text on video

    result.write_videofile(
        "partitions\\" + str(index) +
        ".mp4")  # Write the partition into a new partition folder
    os.chdir(cwd)
    video.close()
    txt_clip.close()
    result.close()
示例#5
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.mp4'
        if len(text) >= 400:
            text = text[:400] + '...'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/letmein/letmein.mp4")



        textclip = TextClip(txt=text, bg_color='White', fontsize=32, font='Verdana', method='caption', align='west', size=(clip.size[0], None)).set_duration(clip.duration)

        color = ColorClip((clip.size[0], textclip.size[1]), color=(255, 255, 255), ismask=False).set_duration(clip.duration)

        video = CompositeVideoClip([clip.set_position(("center", textclip.size[1])), color, textclip],
                                   size=(clip.size[0], textclip.size[1] + clip.size[1]))

        video.write_videofile(name, threads=4, preset='superfast', verbose=False)
        clip.close()
        video.close()
        return send_file(name, mimetype='video/mp4')
示例#6
0
def mov_change_bg(avi, mov, name):
    logging.info('start processing {}'.format(name))
    clip1 = VideoFileClip(avi)
    clip3 = VideoFileClip(mov, has_mask=True)
    video = CompositeVideoClip([clip1, clip3])
    video.write_videofile(name, audio=True)
    video.close()
    logging.info('end processing {}'.format(name))
示例#7
0
def add_zm(fg_in_bg_avi, zm_video_path, pictrue_name, video_name, output):
    clip1 = VideoFileClip(fg_in_bg_avi)
    clip3 = VideoFileClip(zm_video_path, has_mask=True)
    video = CompositeVideoClip([clip1, clip3])
    try:
        os.mkdir(output)
    except OSError:
        pass
    video_name = video_name.split('/', 2)[-1]
    name = output + '/' + pictrue_name + video_name + ".mp4"
    video.write_videofile(name, audio=True)  # 先不加音频
    video.close()
    return name
def make_video(animations, video, path, video_file_name):
    path = path
    background = VideoFileClip(path + video['name'])
    clips = [background]
    for index, animation in enumerate(animations):
        to_clip = path + animation['name']
        start_time = animation['start_time']
        clip = VideoFileClip(to_clip)
        masked_clip = clip.fx(vfx.mask_color, color=[0, 0, 0], thr=35, s=40)
        clips.append(masked_clip.set_start(start_time))

    video = CompositeVideoClip(clips)
    video.write_videofile(f"{path}{video_file_name}.mp4")
    video.close()
示例#9
0
def mov_change_mp4(avi, mov, name):
    logging.info('start processing {}'.format(name))
    clip3 = VideoFileClip(mov, has_mask=True)
    end = clip3.duration  # seconds

    avivideo = VideoFileClip(avi)
    end_avi = avivideo.duration

    if end <= end_avi:
        video1 = VideoFileClip(avi).subclip(t_start=0, t_end=end)
        clip1 = video1.without_audio()
        video = CompositeVideoClip([clip1, clip3])
        video.write_videofile(name, audio=True)
        video.close()
        logging.info('start processing {}'.format(name))

    else:
        logging.error('time of mov is longer')
示例#10
0
    def generate(self, avatars, text, usernames, kwargs):
        raise BadRequest(
            "Crab endpoint is disabled on flare's imgen instance. Use trustys crab rave cog or host your own imgen."
        )
        name = uuid.uuid4().hex + '.mp4'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        t = text.upper().replace(', ', ',').split(',')
        if len(t) != 2:
            raise BadRequest(
                'You must submit exactly two strings split by comma')
        if (not t[0] and not t[0].strip()) or (not t[1] and not t[1].strip()):
            raise BadRequest('Cannot render empty text')
        clip = VideoFileClip("assets/crab/template.mp4")
        text = TextClip(t[0], fontsize=48, color='white', font='Symbola')
        text2 = TextClip("____________________", fontsize=48, color='white', font='Verdana')\
            .set_position(("center", 210)).set_duration(15.4)
        text = text.set_position(("center", 200)).set_duration(15.4)
        text3 = TextClip(t[1], fontsize=48, color='white', font='Verdana')\
            .set_position(("center", 270)).set_duration(15.4)

        video = CompositeVideoClip([
            clip,
            text.crossfadein(1),
            text2.crossfadein(1),
            text3.crossfadein(1)
        ]).set_duration(15.4)

        video.write_videofile(name,
                              threads=4,
                              preset='superfast',
                              verbose=False)
        clip.close()
        video.close()
        return send_file(name, mimetype='video/mp4')
示例#11
0
    def gencrabrave(self, t, filename):
        clip = VideoFileClip("crabtemplate.mp4")
        text = TextClip(t[0], fontsize=48, color='white', font='Verdana')
        text2 = TextClip("____________________", fontsize=48, color='white', font='Verdana')\
         .set_position(("center", 210)).set_duration(15.4)
        text = text.set_position(("center", 200)).set_duration(15.4)
        text3 = TextClip(t[1], fontsize=48, color='white', font='Verdana')\
         .set_position(("center", 270)).set_duration(15.4)

        video = CompositeVideoClip([
            clip,
            text.crossfadein(1),
            text2.crossfadein(1),
            text3.crossfadein(1)
        ]).set_duration(15.4)

        video.write_videofile(filename,
                              threads=25,
                              preset='superfast',
                              verbose=False)
        clip.close()
        video.close()
示例#12
0
def orchestrate_video_creation(image_list, text):
    first_image_reference = image_list.pop()

    generator = create_opencv_image_from_stringio([first_image_reference])
    first_image = next(generator)
    height, width, layers = first_image.shape

    size = (width, height)
    print("resolution of images", size)
    video_path = 'project.mp4'
    frames_per_second = len(image_list) / 12
    print("frames per second", frames_per_second)
    out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'),
                          frames_per_second, size)

    for img in create_opencv_image_from_stringio(image_list):
        out.write(img)

    out.release()

    sleep(1)

    prepare_video(video_path,
                  aspect_ratios=(9 / 16),
                  max_duration=14.9,
                  min_size=(612, 612),
                  max_size=(1080, 1920),
                  save_path='second.mp4')

    # TODO: set text a bit up from bottom
    text = TextClip(text, fontsize=54, color='blue').set_position(
        ("center")).set_duration(4)
    clip = VideoFileClip('second.mp4', audio=False)
    final_clip = CompositeVideoClip([clip, text])
    final_clip.write_videofile(video_path, fps=frames_per_second)
    text.close()
    clip.close()
    final_clip.close()
    return {'file': 'project.mp4'}