Ejemplo n.º 1
0
def main():
    clips = []
    with open("names.txt") as f:
        name = f.readlines()
        print(name)
        for i in name:
            i = i.split('\n')[0]
            clips.append(make(i))
    print(clips)
    concatenate_videoclips(clips).set_fps(30).write_videofile("飞跃起点理.mp4")
    exit()
    clip1 = ImageClip("./images/2.jpg")
    txt = TextClip("吼哇!123ASDasd".encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=48)
    txt_col = txt.on_color(size=(clip1.w, txt.h + 10),
                           color=(0, 0, 0),
                           pos=(6, 'center'),
                           col_opacity=0.6).set_pos(lambda t: ((200), (800)))
    w, h = moviesize = clip1.size
    txt_mov = txt_col.set_pos(lambda t: (max(w / 30, int(w - 1 * w * t)),
                                         max(5 * h / 6, int(100 * t))))

    CompositeVideoClip([
        clip1, txt_mov
    ]).set_duration(1).set_fps(30).write_videofile("my_concatenation.mp4")
    CompositeVideoClip([clip1, txt_mov
                        ]).set_duration(1).set_fps(30).save_frame("test.png",
                                                                  t="00:00:01")
Ejemplo n.º 2
0
def create_video_of_list_of_clips(clips, output):
    print('Rendering video to location  %s' % (output))
    final_clips = []

    for clip in clips:
        path = constants.DOWNLOAD_LOCATION + clip.channel.slug + '/' + clip.slug + '.mp4'

        print(path)

        video = VideoFileClip(path)
        title = TextClip(txt=clip.channel.name + ': ' + clip.title,
                         font='Amiri-regular',
                         color='white',
                         fontsize=55).set_duration(8)
        title_mov = title.set_pos((0.05, 0.8), relative=True)

        # Create video object with text
        final_clip = CompositeVideoClip([video, title_mov]).resize((1280, 720))
        final_clips.append(final_clip)

        # Remove from memory
        del title
        del video
        del final_clip

    # Add clips together
    finished = concatenate_videoclips(final_clips, method='compose')

    # Render video
    finished.write_videofile(output, fps=30)
Ejemplo n.º 3
0
def label_clip(video_path, label, start_second, end_second):
    clip = VideoFileClip(video_path)
    text_clip = TextClip(label, fontsize=40, color='white', bg_color='red')
    text_clip = text_clip.set_pos(('center', 'bottom'))
    text_clip = text_clip.set_start(start_second).set_duration(end_second -
                                                               start_second)
    return CompositeVideoClip([clip, text_clip])
Ejemplo n.º 4
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.gif'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/kowalski/kowalski.gif")
        text = TextClip(text,
                        fontsize=36,
                        method='caption',
                        size=(245, None),
                        align='West',
                        color='black',
                        stroke_color='black',
                        stroke_width=1,
                        font='Verdana').set_duration(clip.duration)
        text = text.set_position((340, 65)).set_duration(clip.duration)
        text = rotate(text, angle=10, resample='bilinear')

        video = CompositeVideoClip([clip, text]).set_duration(clip.duration)

        video.write_gif(name)
        clip.close()
        video.close()
        return send_file(name, mimetype='image/gif')
Ejemplo n.º 5
0
def subscribers_video():

    subscriber_videos = []
    random.seed()
    with open(join(settings.VIDEO_ASSETS, "subscribers.txt")) as f:
        subscribers = f.read()
    subscribers = subscribers.split("\n")[:-1]
    background = ImageClip(join(settings.IMAGE_ASSETS,
                                "subscribers_bg.png")).set_duration(duration)
    title = TextClip(
        txt="SPECIAL THANKS TO OUR SUBSCRIBERS!",
        color='white',
        font='gill_sans.txt',
        fontsize=60).set_duration(duration)
    title = title.set_pos((350, 10))
    t1 = 1
    for sub_name in subscribers:
        t1 += 0.2
        t0 = math.log(t1)
        subscriber = TextClip(
            txt=sub_name, color='white', font='gill_sans.txt',
            fontsize=40).set_duration(duration)
        x, y = random.random() * 1700, random.random() * 940 + 100
        subscriber = subscriber.set_pos(
            lambda t, t0=t0, x=x, y=y: ((x, (y, -200)[t < t0])))
        subscriber_videos.append(subscriber)

    audioclip = VideoFileClip(join(settings.VIDEO_ASSETS, "gunfire.mp4"))

    final = CompositeVideoClip(
        [audioclip, background, title] + subscriber_videos,
        size=((1920, 1080))).fadeout(0.2)
    return final
Ejemplo n.º 6
0
 def Gen_Video(self, beat_times, mp3path, uuid):
     FONT_URL = '../font/heimi.TTF'
     with open(uuid + '.txt', 'r', encoding='utf-8') as f:
         text_str = f.read()
     word_list = text_str.split('\n')
     clips = []
     for index, beat_time in enumerate(beat_times[:-1]):
         if index >= len(word_list):
             break
         print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
         text_clip = TextClip(
             word_list[index],
             fontsize=320 // 8,
             color='white',
             size=(320, 640),
             method='caption',
             font=FONT_URL) \
             .set_start(beat_time) \
             .set_end(beat_times[index + 1])
         text_clip = text_clip.set_pos('center')
         clips.append(text_clip)
     final_clip = CompositeVideoClip(clips)
     audio_clip = AudioFileClip(mp3path)
     final_video = final_clip.set_audio(audio_clip)
     final_video.write_videofile(str(uuid) + '.mp4',
                                 fps=30,
                                 codec='mpeg4',
                                 preset='ultrafast',
                                 audio_codec="libmp3lame",
                                 threads=4)
Ejemplo n.º 7
0
def write_video_file(file_path, pred_label_score, gt_info, save_dir):
    video_clip = VideoFileClip(file_path)
    text_clip = TextClip(txt=pred_label_score,
                         font='utils/SimHei.ttf',
                         color='white',
                         fontsize=32,
                         bg_color='black',
                         align='West').set_pos(
                             ("left", "top")).set_duration(video_clip.duration)
    compose_list = [video_clip, text_clip]
    if gt_info != "":
        gt_text_clip = TextClip(txt=gt_info,
                                font='utils/SimHei.ttf',
                                color='white',
                                fontsize=32,
                                bg_color='black',
                                align='East').set_pos(
                                    ("right", "bottom")).set_duration(
                                        video_clip.duration)
        compose_list.append(gt_text_clip)
    result = CompositeVideoClip(compose_list)
    video_name = os.path.basename(file_path)
    result.write_videofile(save_dir + "/" + video_name,
                           fps=25,
                           codec='libx264',
                           audio_codec='aac',
                           temp_audiofile='temp-audio.m4a',
                           remove_temp=True)
Ejemplo n.º 8
0
 def prepend_intertitle(
     self,
     size: Optional[Size] = None,
     color: str = DEFAULT_INTERTITLE_COLOR,
     font: str = DEFAULT_INTERTITLE_FONT,
     fontsize: int = DEFAULT_INTERTITLE_FONTSIZE,
     position: str = DEFAULT_INTERTITLE_POSITION,
     duration: int = DEFAULT_INTERTITLE_DURATION,
 ):
     if not self.meta.text:
         logger.warning('%s: Missing intertitle text')
         return
     logger.info('%s: Intertitle "%s"', self.meta.path, self.meta.text)
     if not size:
         size = Size(width=self.video_file_clip.w,
                     height=self.video_file_clip.h)
     text_clip = TextClip(
         self.meta.text.replace('|', '\n'),
         size=(size.width * INTERTITLE_TEXT_WIDTH_FACTOR, None),
         color=color,
         font=font,
         fontsize=fontsize,
         method='caption',
         align='center',
     )
     composite_clip = CompositeVideoClip([text_clip.set_pos(position)],
                                         (size.width, size.height))
     intertitle_clip = composite_clip.subclip(0, duration)
     self.video_file_clip = concatenate_videoclips(
         [intertitle_clip, self.video_file_clip], method='compose')
Ejemplo n.º 9
0
def video_render(txt_file,image_file,sound_file,save_file):
        from moviepy.editor import ImageClip
        from moviepy.editor import CompositeVideoClip
        from moviepy.editor import CompositeAudioClip
        from moviepy.editor import TextClip
        from moviepy.editor import AudioFileClip
        from moviepy.editor import concatenate
        from moviepy.config import change_settings
        change_settings({"IMAGEMAGICK_BINARY": "/usr/local/bin/convert"})
        text=[]
        
        with open(txt_file,'r') as file:
            for lines in file:
                if lines!="\n":
                    text.append(lines.rstrip('\n'))
        durs=[]
        for i in text:            
            res = len(re.findall(r'\w+', i)) 
            if res/2>3:
                durs.append(res/2)
            else:
                durs.append(3)
        total_duration=sum(durs)
        
        a_clip = AudioFileClip(sound_file)
        if a_clip.duration<total_duration:
            new_audioclip = CompositeAudioClip([a_clip, a_clip.set_start(a_clip.duration-1)]).set_duration(total_duration+3)
        else:
            new_audioclip=a_clip.set_duration(total_duration+3)
        
        screen=(1920,1080)
        clip_list = []
        i=0
        for string in text:
            duration=durs[i]
            i+=1
            try:
                txt_clip = TextClip(string, fontsize = 70, color = 'white', method='caption',size=screen ).set_duration(duration).set_pos('center')
                clip_list.append(txt_clip)
            except UnicodeEncodeError:
                txt_clip = TextClip("Issue with text", fontsize = 70, color = 'white').set_duration(2) 
                clip_list.append(txt_clip)
        
        final_text_clip = concatenate(clip_list, method = "compose").set_start(3)  
            
        v_clip = ImageClip(image_file).set_duration(total_duration+3)
        video=CompositeVideoClip([v_clip, final_text_clip])
        # video = video.set_audio(AudioFileClip('sound/Serenity (1).mp3'))
        video = video.set_audio(new_audioclip)
        video.write_videofile(save_file, 
                              codec='libx264',
                              fps=10, 
                              threads=4,
                              audio_codec='aac', 
                              temp_audiofile='temp-audio.m4a', 
                              remove_temp=True
                              )
Ejemplo n.º 10
0
def get_output(video_path,
               out_filename,
               label,
               fps=30,
               font_size=20,
               font_color='white',
               resize_algorithm='bicubic',
               use_frames=False):
    """Get demo output using ``moviepy``.

    This function will generate video file or gif file from raw video or
    frames, by using ``moviepy``. For more information of some parameters,
    you can refer to: https://github.com/Zulko/moviepy.

    Args:
        video_path (str): The video file path or the rawframes directory path.
            If ``use_frames`` is set to True, it should be rawframes directory
            path. Otherwise, it should be video file path.
        out_filename (str): Output filename for the generated file.
        label (str): Predicted label of the generated file.
        fps (int): Number of picture frames to read per second. Default: 30.
        font_size (int): Font size of the label. Default: 20.
        font_color (str): Font color of the label. Default: 'white'.
        resize_algorithm (str): The algorithm used for resizing.
            Default: 'bicubic'. For more information,
            see https://ffmpeg.org/ffmpeg-scaler.html.
        use_frames: Determine Whether to use rawframes as input. Default:False.
    """

    try:
        from moviepy.editor import (ImageSequenceClip, TextClip, VideoFileClip,
                                    CompositeVideoClip)
    except ImportError:
        raise ImportError('Please install moviepy to enable output file.')

    if use_frames:
        frame_list = sorted(
            [osp.join(video_path, x) for x in os.listdir(video_path)])
        video_clips = ImageSequenceClip(frame_list, fps=fps)
    else:
        video_clips = VideoFileClip(
            video_path, resize_algorithm=resize_algorithm)

    duration_video_clip = video_clips.duration
    text_clips = TextClip(label, fontsize=font_size, color=font_color)
    text_clips = (
        text_clips.set_position(
            ('right', 'bottom'),
            relative=True).set_duration(duration_video_clip))

    video_clips = CompositeVideoClip([video_clips, text_clips])

    out_type = osp.splitext(out_filename)[1][1:]
    if out_type == 'gif':
        video_clips.write_gif(out_filename)
    else:
        video_clips.write_videofile(out_filename, remove_temp=True)
 def description_helper(self, description, currloc, position, clip):
     txt = TextClip(description, font='Arial', color="MediumSpringGreen", \
                                                                 fontsize=44)
     txt_col = txt.on_color(col_opacity=0).set_duration(clip.duration).\
                                                         set_start(currloc)
      
     txt_mov = txt_col.set_position(('center', position))
      
     return txt_mov
Ejemplo n.º 12
0
def sub_config(txt):
    text_clip = TextClip(txt,
                         font='Helvetica Neue',
                         fontsize=40,
                         color='white')
    text_clip = text_clip.on_color(size=(int(text_clip.w * 1.05),
                                         int(text_clip.h * 1.05)),
                                   col_opacity=0.5)
    return text_clip
Ejemplo n.º 13
0
def annotate(clip, txt, speaker, txt_color='white', fontsize=30, font='Arial'):
    """ Writes a text at the bottom of the clip. """
    txt_colors = ['red', 'black', 'white', 'blue', 'green']
    txtclip = TextClip(txt,
                       fontsize=fontsize,
                       font=font,
                       color=txt_colors[speaker])
    cvc = CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
    return cvc.set_duration(clip.duration)
Ejemplo n.º 14
0
 def __init__(self, *args, **kwargs):
     self.clips = []
     self.add_timestamp = kwargs.pop("timestamp", False)
     self.timelapse = ImageSequenceClip(*args, **kwargs)
     self.clips.append(self.timelapse)
     if self.add_timestamp:
         txt_clip = TextClip("MoviePy ROCKS", fontsize=50, color="white")
         self.txt_clip = txt_clip.set_pos("center").set_duration(5)
         self.clips.append(self.txt_clip)
     super(TimestampedImageSequenceClip, self).__init__(self.clips)
Ejemplo n.º 15
0
 def __init__(self, *args, **kwargs):
     self.clips = []
     self.add_timestamp = kwargs.pop('timestamp', False)
     self.timelapse = ImageSequenceClip(*args, **kwargs)
     self.clips.append(self.timelapse)
     if self.add_timestamp:
         txt_clip = TextClip("MoviePy ROCKS", fontsize=50, color='white')
         self.txt_clip = txt_clip.set_pos('center').set_duration(5)
         self.clips.append(self.txt_clip)
     super(TimestampedImageSequenceClip, self).__init__(self.clips)
Ejemplo n.º 16
0
 def build_main(self, clip, max_duration: int = 0) -> StoryBuild:
     """Build clip
     :clip: Clip object (VideoFileClip, ImageClip)
     :max_duration: Result duration in seconds
     :return: StoryBuild (with new path and mentions)
     """
     clips = []
     # Background
     if self.bgpath:
         assert self.bgpath.exists(),\
             f'Wrong path to background {self.bgpath}'
         background = ImageClip(str(self.bgpath))
         clips.append(background)
     # Media clip
     clip_left = (self.width - clip.size[0]) / 2
     clip_top = (self.height - clip.size[1]) / 2
     if clip_top > 90:
         clip_top -= 50
     media_clip = clip.set_position((clip_left, clip_top))
     clips.append(media_clip)
     mention = self.mentions[0] if self.mentions else None
     # Text clip
     caption = "@%s" % mention.user.username if mention.user.username else self.caption
     text_clip = TextClip(caption,
                          color="white",
                          font="Arial",
                          kerning=-1,
                          fontsize=100,
                          method="label")
     text_clip_left = (self.width - 600) / 2
     text_clip_top = clip_top + clip.size[1] + 50
     offset = (text_clip_top + text_clip.size[1]) - self.height
     if offset > 0:
         text_clip_top -= offset + 90
     text_clip = text_clip.resize(width=600).set_position(
         (text_clip_left, text_clip_top)).fadein(3)
     clips.append(text_clip)
     # Mentions
     mentions = []
     if mention:
         mention.x = 0.49892962  # approximately center
         mention.y = (text_clip_top + text_clip.size[1] / 2) / self.height
         mention.width = text_clip.size[0] / self.width
         mention.height = text_clip.size[1] / self.height
         mentions = [mention]
     duration = max_duration
     if max_duration and clip.duration and max_duration > clip.duration:
         duration = clip.duration
     destination = tempfile.mktemp('.mp4')
     CompositeVideoClip(clips, size=(self.width, self.height))\
         .set_fps(24)\
         .set_duration(duration)\
         .write_videofile(destination, codec='libx264', audio=True, audio_codec='aac')
     return StoryBuild(mentions=mentions, path=destination)
Ejemplo n.º 17
0
def make(name):
    clip1 = ImageClip("./images/{}.jpg".format(name))
    txt = TextClip(name.encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=96)
    txt = txt.on_color(size=(clip1.w, txt.h + 10),
                       color=(0, 0, 0),
                       pos=(6, "center"),
                       col_opacity=0.6)
    txt = txt.set_pos(lambda t: (max(clip1.w / 7, int(
        clip1.w - 1 * clip1.w * t)), max(3 * clip1.h / 4, int(100 * t))))
    return CompositeVideoClip([clip1, txt]).set_duration(3)
Ejemplo n.º 18
0
def textclip():
    text_1 = (TextClip("剪辑不易 \n 关注啊", fontsize=70, color='white', font="FZZJ.TTF")
              .set_position(lambda t: ('center', 100 - t))
              .set_duration(15))
    text_2 = (TextClip("赶快右侧 \n 点赞啦", fontsize=70, color='red', font="FZZJ.TTF")
              .set_position(lambda t: ('center', 900 - t))
              .set_duration(15))

    clip1 = VideoFileClip("cut_video.mp4").fx(vfx.mirror_x)
    print(clip1.duration)
    result = CompositeVideoClip([clip1.set_position('center'), text_1, text_2],
                                size=[540, 1024])  # Overlay text on video
    result.write_videofile("final.mp4")
Ejemplo n.º 19
0
    def render(self, talk, act, exp, move, render_video, dft_exp_dt=0.2):
        if self.cache_dir is not None:
            cache_video = '{}.mp4'.format(
                get_macro_act_key(talk, act, exp, move))
            cache_video = os.path.join(self.cache_dir, cache_video)
            if os.path.exists(cache_video):
                clip = VideoFileClip(cache_video)
                clip.write_videofile(render_video)
                return

        act_clip = self.act_assets[act]
        default_exp_clip = self.exp_assets['null']
        exp_clip = self.exp_assets[exp]

        if talk == '':
            clips = [
                act_clip,
                default_exp_clip.set_position(
                    lambda t: (291, 160)).set_duration(dft_exp_dt)
            ]
        else:
            talk_clip = TextClip(talk,
                                 font='data/SimHei.ttf',
                                 color='green',
                                 method='caption',
                                 fontsize=30)
            clips = [
                act_clip,
                talk_clip.set_position(('center', 50)),
                default_exp_clip.set_position(
                    lambda t: (291, 160)).set_duration(dft_exp_dt)
            ]

        clips.append(
            exp_clip.set_position(lambda t: (291, 160)).set_start(dft_exp_dt))
        ts = dft_exp_dt + exp_clip.duration
        if ts < act_clip.duration:
            clips.append(
                default_exp_clip.set_position(
                    lambda t: (291, 160)).set_duration(act_clip.duration -
                                                       ts).set_start(ts))

        if move != 'null':
            move_clip = self.move_assets[move]
            clips.append(move_clip.set_position(('center', 650)))

        final_clip = CompositeVideoClip(clips).set_duration(act_clip.duration)
        final_clip.write_videofile(render_video)
Ejemplo n.º 20
0
 def process_package(source: str, parameters: AnnotationParameters):
     log.info(''.join(["SOURCE: ", source]))
     main_clip = VideoFileClip(source)
     log.info(''.join([
         "Parameters: Font size: ",
         str(parameters.font_size), " Text: ", parameters.text,
         " duration: ",
         str(parameters.duration), " color:", parameters.color
     ]))
     txt_clip = TextClip(parameters.text,
                         fontsize=parameters.font_size,
                         color=parameters.color)
     txt_clip = txt_clip.set_pos('center').set_duration(parameters.duration)
     video = CompositeVideoClip([main_clip, txt_clip])
     log.info(''.join(["Video processed correctly: "]))
     return video
Ejemplo n.º 21
0
 def background_clip(self):
     return TextClip(
         txt=self.text,
         color=TEXT_COLOR_AFTER,
         fontsize=FONT_SIZE,
         size=self.clip_size,
     )
Ejemplo n.º 22
0
 def foreground_clip(self):
     clip = TextClip(txt=self.text,
                     color=TEXT_COLOR,
                     fontsize=FONT_SIZE,
                     size=self.clip_size)
     clip.mask.get_frame = self.mask_frame
     return clip
Ejemplo n.º 23
0
def collage(output_video, *input_videos):
    input_clips = []
    for path in input_videos:
        video_clip = VideoFileClip(path)
        _, _, amp = os.path.basename(path).partition("@")
        amp, _, _ = amp.partition('.')
        text_clip = (TextClip(
            txt='Amplified {}'.format(amp) if amp else 'Input',
            color='white',
            method='label',
            fontsize=32,
            font='Helvetica-Bold').set_duration(
                video_clip.duration).set_position(('center', 0.05),
                                                  relative=True))
        clip = CompositeVideoClip((video_clip, text_clip), use_bgclip=True)
        input_clips.append(clip)
    if len(input_clips) < 4:
        num_columns = 1
    elif len(input_clips) < 5:
        num_columns = 2
    else:
        num_columns = 3
    final_clip = clips_array([
        input_clips[i:i + num_columns]
        for i in range(0, len(input_clips), num_columns)
    ])
    final_clip.write_videofile(output_video, audio=False)
    return output_video
Ejemplo n.º 24
0
def createPartition(index, videoData):
    os.chdir("videos/" + str(index))
    #Get the start and end second from the YAML Config and edit all of the videos into clips that can be concatinated.
    start = videoData['startFrame']
    end = videoData['endFrame']
    print(start, end)
    mp4 = findmp4()
    print(mp4)
    os.chdir(cwd)
    fileLoc = 'videos' + '\\' + str(index) + '\\' + mp4
    video = VideoFileClip(fileLoc).subclip(start - 4, end + 2).fx(
        vfx.fadeout, duration=1).fx(vfx.fadein, duration=5)
    # Make the text. Many more options are available.
    txt_clip = (TextClip(videoData['date'],
                         fontsize=35,
                         color='white',
                         font='Hans Kendrick').set_position(
                             ("center", 80)).set_duration(5).fx(
                                 vfx.fadeout, duration=1.5).fx(vfx.fadein,
                                                               duration=3))

    result = CompositeVideoClip([video, txt_clip])  # Overlay text on video

    result.write_videofile(
        "partitions\\" + str(index) +
        ".mp4")  # Write the partition into a new partition folder
    os.chdir(cwd)
    video.close()
    txt_clip.close()
    result.close()
Ejemplo n.º 25
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.mp4'
        if len(text) >= 400:
            text = text[:400] + '...'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/letmein/letmein.mp4")



        textclip = TextClip(txt=text, bg_color='White', fontsize=32, font='Verdana', method='caption', align='west', size=(clip.size[0], None)).set_duration(clip.duration)

        color = ColorClip((clip.size[0], textclip.size[1]), color=(255, 255, 255), ismask=False).set_duration(clip.duration)

        video = CompositeVideoClip([clip.set_position(("center", textclip.size[1])), color, textclip],
                                   size=(clip.size[0], textclip.size[1] + clip.size[1]))

        video.write_videofile(name, threads=4, preset='superfast', verbose=False)
        clip.close()
        video.close()
        return send_file(name, mimetype='video/mp4')
Ejemplo n.º 26
0
def make_test_vid(note_vid, segments, fname):

    t = 0

    clips = []
    padding = 2
    i = 0

    for note, (start, end) in segments:
        clip = note_vid.subclip(start, end)
        clip = clip.set_start(t)

        clips.append(clip)

        txt = (TextClip("%d %s" % (i, note),
                        color='white',
                        font='Ubuntu-Bold',
                        fontsize=22).margin(1).margin(
                            top=30, left=30, opacity=0.0).set_pos(
                                ('left',
                                 'top')).set_duration(end - start +
                                                      padding).set_start(t))
        clips.append(txt)

        t += (end - start) + padding
        i += 1

        print(t, i)

    full_video = CompositeVideoClip(clips)
    print('full length %f' % full_video.duration)
    full_video.write_videofile(fname, threads=20)
Ejemplo n.º 27
0
def gifEngine(starttime,
              endtime,
              videofileloc,
              srtfileloc,
              outfileloc,
              logger='gifEngine.log'):
    logging.basicConfig(filename=logger, level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    prolog.basic_config()
    # creating the initial GIF
    try:
        generator = lambda txt: TextClip(
            txt, font='Impact', fontsize=28, color='white')
        video = VideoFileClip(videofileloc)
        sub = SubtitlesClip(srtfileloc, generator).set_position(
            ("center", "bottom"), relative=True)
        composite = CompositeVideoClip([video, sub])
        composite = composite.subclip(starttime, endtime)
        composite.write_gif(outfileloc,
                            program='ffmpeg',
                            opt='palettegen',
                            logger=logger,
                            verbose=True)  # using new palettegen opt
        return 0
    except (IOError, OSError) as err:
        return err
Ejemplo n.º 28
0
def generate_caption(pos, proj_id, vid_id, capt, colour, font, font_size):
    pos = [int(p) for p in pos]
    print(pos)
    vid = VideoFileClip(
        os.path.join(app.config['BASE_DIR'], app.config['VIDS_LOCATION'],
                     str(proj_id),
                     str(vid_id) + ".mp4"))
    vid = vid.subclip(0, vid.duration).resize((852, 480))

    print(int(sizes[font_size] * 0.44357))
    caption = TextClip(txt=capt,
                       fontsize=int(sizes[font_size] * 0.44357),
                       color='#' + str(colour),
                       font=font).set_position(pos).set_duration(vid.duration)

    vid = CompositeVideoClip([vid, caption])

    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')

    vid.save_frame(os.getcwd() + '/app/static/img/' + str(proj_id) +
                   '/{}_frame_caption_{}.png'.format(str(vid_id), st))

    return '/static/img/{}/{}_frame_caption_{}.png'.format(
        str(proj_id), str(vid_id), st)
Ejemplo n.º 29
0
def video_collage(vid_names, ordering, probs, correct_class_prob, labels):
    sel_clips = []
    for i in range(6):
        vname = vid_names[ordering[i]]
        # Load all the metadata for this video
        with open(vname.replace(
                'images/', 'scenes/').replace('.avi', '.json'), 'r') as fin:
            metadata = json.load(fin)
        gt = np.argmax(labels[ordering[i]])
        pred = np.argmax(probs[ordering[i]])
        pred_map = location_predictions_to_map(probs[ordering[i]], metadata)
        print(vname, gt, pred, correct_class_prob[ordering[i]])
        main_video = VideoFileClip(vname).margin(3)
        sel_clips.append(CompositeVideoClip([
            clips_array([[
                main_video,
                ImageSequenceClip(pred_map, fps=main_video.fps),
            ]]),
            TextClip(
                "GT {} Pred {}".format(gt, pred),
                font=MOVIEPY_FONT)
            .set_pos((10, 10))
            .set_duration(main_video.duration),
        ]))
    return clips_array(chunkify(sel_clips, 3))
Ejemplo n.º 30
0
 def from_generators(gif_file_name, feeding_data, how_many_images_to_generate, image_generator):
     frames = []
     duration_pre_image = 0.5
     feeding_angle = feeding_data.steering_angle
     for index in range(how_many_images_to_generate):
         image, angle = image_generator(feeding_data)
         text = TextClip(txt="angle:{:.2f}/{:.2f}".format(feeding_angle, angle),
                         method="caption", align="North",
                         color="white", stroke_width=3, fontsize=18)
         text = text.set_duration(duration_pre_image)
         frames.append(CompositeVideoClip([
             ImageClip(image, duration=duration_pre_image),
             text
         ]))
     final = concatenate_videoclips(frames)
     final.write_gif(gif_file_name, fps=2)
Ejemplo n.º 31
0
def generateVideo(text, line2, vId):
    sum = 0
    fnlTxt = ""
    lines = [[], []]
    for ind, c in enumerate(text.split(" ")):
        if sum + len(c) <= charLimit:
            lines[0] += c
            if ind != 0:
                fnlTxt += " "
            fnlTxt += c
            sum += len(c)
        elif sum + len(c) <= charLimit * 2:
            if len(lines[1]) == 0:
                fnlTxt += "\n"
            if len(lines[1]) != 0:
                fnlTxt += " "
            lines[1] += c
            fnlTxt += c
            sum += len(c)
    print(fnlTxt)
    # Create the text
    txt_clip = (TextClip(
        fnlTxt, fontsize=ftSz, color="white", font=fontPth,
        align="West").set_position("left").set_start(0.6).set_duration(1.4))
    line2C = (TextClip(
        line2, fontsize=ftSz, color="white", font=fontPth,
        align="center").set_position("left").set_start(2).set_duration(2.4))
    line2C = line2C.set_position(lambda t: (w * 0.1, 1.8 * h / 6))
    line2E = (TextClip(line2,
                       fontsize=ftSz,
                       color="white",
                       font=fontPth,
                       align="center").set_position("left").set_start(
                           4.4).set_duration(0.6).fadeout(0.6))
    line2E = line2E.set_position(lambda t: (w * 0.1, 1.8 * h / 6))
    txt_mov = txt_clip.set_pos(lambda t: (  # animate the text
        min((w * 0.1), int(-txt_clip.w - 500 + 2.7 * w * t)),  # X
        max(1.8 * h / 6, int(100 * t)),  # Y  # Y
    ))
    rName = text + "." + ext
    nName = f"{vId}.{ext}"
    result = CompositeVideoClip([video, txt_mov, line2C,
                                 line2E])  # Overlay text on video
    result.write_videofile(rName, fps=video.reader.fps)  # Many options...
    # Moves the video file to the render directory
    shutil.move(rName, "render/" + nName)
    return filename
Ejemplo n.º 32
0
 def get_number_textclip(self) -> TextClip:
     metadata = self.get_metadata()
     return TextClip(
         f"#{metadata['number_in_series']}",
         fontsize=130,
         font="Arial-Bold",
         color="gold4",
     ).set_position((0.04, 0.8), relative=True)
Ejemplo n.º 33
0
 def add_caption(caption, clip):
     text = (TextClip(caption,
                      font='Amiri-regular',
                      color='white',
                      fontsize=80).margin(40).set_duration(
                          clip.duration).on_color(
                              color=(0, 0, 0), col_opacity=0.6))
     return CompositeVideoClip([clip, text])
    def generate_vid_list(self, option, currloc):
        tmpclips = []
        totalcliptime = 0

        for fldr_date in self.FLDR_DATES:
            folderloc = os.path.join(SECTIONS_PATH, option, 'videos', \
                                                                str(fldr_date))
            datafolderloc = os.path.join(SECTIONS_PATH, option, 'data', \
                                                                str(fldr_date))
         
            for root, _, filenames in os.walk(folderloc):
                for filename in fnmatch.filter(filenames, '*.mp4'):
                    datavar = filename.split('.')[0]
                    dname = os.path.join(datafolderloc, datavar + ".json")
                    if not os.path.isfile(dname):
                        sys.stderr.write("ERROR: no data found for: %s\n" % \
                                                                        dname)
                        continue
                    else:
                        with open(dname) as json_file:
                            json_data = json.load(json_file)
         
                    if self.check_blacklisted_users(json_data["username"]):
                        continue
                    
                    fname = os.path.join(root, filename)
                    if not os.path.isfile(fname):
                        sys.stderr.write("ERROR: no video found for: %s\n" % \
                                                                        fname)
                        continue 
                    else:
                        sys.stdout.write("Adding file name: %s\n" % \
                                                fname[len(SECTIONS_PATH)+1:])
         
                    try:
                        clip = (VideoFileClip(fname).resize((1157, 830)).\
                                            set_start(currloc).crossfadein(1).\
                                            set_position("center").\
                                            set_position((383, 88)))
                    except:
                        sys.stderr.write("ERROR: cannot open video: %s\n" % \
                                                                        fname)
         
                        sys.stderr.write("DELETING: %s\n" % fname)
                        os.remove(fname)
                        sys.stderr.write("DELETING: %s\n" % dname)
                        os.remove(dname)
                        continue
         
                    tmpclips.append(clip)
                     
                    # add creator image
                    self.make_creator_icon(json_data["avatarUrl"], \
                                                            datavar + ".jpg")
                    creatorclip = (ImageClip(datavar + ".jpg", \
                                         transparent=True).set_start(currloc).\
                                         set_duration(clip.duration).\
                                         set_position((383, 10)))
         
                    tmpclips.append(creatorclip)
                    time.sleep(1)
                    os.remove(datavar + ".jpg")
                     
                    # add creator name
                    try:
                        creatortxt = TextClip(json_data["username"].encode(\
                                       'ascii', 'ignore'), font='Arial-Black', \
                                        color="MediumSpringGreen", fontsize=30)
                    except:
                        sys.stderr.write("\nERROR: using default username.\n")
                        creatortxt = TextClip("Default UserName", \
                                                  font='Arial-Black', \
                                                  color="MediumSpringGreen", \
                                                  fontsize=30)

                    creatortxt_col = creatortxt.on_color(col_opacity=0).\
                                                set_duration(clip.duration).\
                                                set_start(currloc)
         
                    creatortxt_mov = creatortxt_col.set_position((465, 23))
                    tmpclips.append(creatortxt_mov)
         
                    # add the description
                    desc_clip = self.create_description(\
                                        json_data["description"], currloc, clip)
                    for item in desc_clip:
                        tmpclips.append(item)
         
                    currloc += clip.duration
                    totalcliptime += clip.duration
     
        return (tmpclips, currloc, totalcliptime)
Ejemplo n.º 35
0
    ylabel('frequency (Hz)')
    tight_layout()
    savefig('tmp.png', dpi=25)
    

def write_audio_clips_to_disk(variations, sample_freq):
    for p in variations.keys()[:]:
        wavfile.write('sound.wav', sample_freq, variations[p].astype(int16))

if __name__ == '__main__':
    screensize = (854, 480)
    clips = []
    # intro screen
    intro_txt = TextClip("""119 Variations\non a theme by Samsung\n'The whistling ringtone'""",
                         color='white', 
                         font='Baskerville Old Face Normal',
                         kerning=5, 
                         fontsize=35)
    
    intro_txt = intro_txt.set_pos('center').set_duration(5)
    intro_cvc = CompositeVideoClip( [intro_txt],
                            size=screensize, transparent=True)
                            
    clips.append(intro_cvc)
    
    # load sound file
    from scipy.io import wavfile
    sample_freq, whistle = wavfile.read("samsung_ringtone.wav")
    t = arange(whistle.shape[0], dtype=float32) / sample_freq
    
    # segment it
Ejemplo n.º 36
0
def create_video(request):
    screensize = (720,460)
    txtClip = TextClip('Cool effect', color='white', font="Amiri-Bold",
                       kerning=5, fontsize=100)
    cvc = CompositeVideoClip( [txtClip.set_pos('center')],
                              size=screensize)

	# THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS

	# helper function
    rotMatrix = lambda a: np.array( [[np.cos(a),np.sin(a)],
                                     [-np.sin(a),np.cos(a)]] )

    def vortex(screenpos,i,nletters):
		d = lambda t : 1.0/(0.3+t**8) #damping
		a = i*np.pi/ nletters # angle of the movement
		v = rotMatrix(a).dot([-1,0])
		if i%2 : v[1] = -v[1]
		return lambda t: screenpos+400*d(t)*rotMatrix(0.5*d(t)*a).dot(v)

    def cascade(screenpos,i,nletters):
		v = np.array([0,-1])
		d = lambda t : 1 if t<0 else abs(np.sinc(t)/(1+t**4))
		return lambda t: screenpos+v*400*d(t-0.15*i)

    def arrive(screenpos,i,nletters):
		v = np.array([-1,0])
		d = lambda t : max(0, 3-3*t)
		return lambda t: screenpos-400*v*d(t-0.2*i)

    def vortexout(screenpos,i,nletters):
		d = lambda t : max(0,t) #damping
		a = i*np.pi/ nletters # angle of the movement
		v = rotMatrix(a).dot([-1,0])
		if i%2 : v[1] = -v[1]
		return lambda t: screenpos+400*d(t-0.1*i)*rotMatrix(-0.2*d(t)*a).dot(v)

	# WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

    letters = findObjects(cvc) # a list of ImageClips

	# WE ANIMATE THE LETTERS

    def moveLetters(letters, funcpos):
		return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters)))
				  for i,letter in enumerate(letters)]

    clips = [ CompositeVideoClip(moveLetters(letters,funcpos),
								  size = screensize).subclip(0,5)
			  for funcpos in [vortex, cascade, arrive, vortexout] ]

	# WE CONCATENATE EVERYTHING AND WRITE TO A FILE

    final_clip = concatenate_videoclips(clips)
    audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration)
    final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0)

    #final_clip = vfx.resize(final_clip, (570, 570))

    final_clip.write_videofile('videos/coolTextEffects.mp4',
                               fps=23, codec='libx264',
                               audio_bitrate='1000k', bitrate='4000k')

    #final_clip.write_gif('videos/coolGif.gif', fps=23)

    html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>"
    return HttpResponse(html)