Example #1
0
def create_supercut_in_batches(composition, outputfile, padding):
    """Create & concatenate video clips in groups of size BATCH_SIZE and output
    finished video file to output directory.
    """
    total_clips = len(composition)
    start_index = 0
    end_index = BATCH_SIZE
    batch_comp = []
    while start_index < total_clips:
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            create_supercut(composition[start_index:end_index], filename,
                            padding)
            batch_comp.append(filename)
            gc.collect()
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
        except:
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
            next

    clips = [VideoFileClip(filename) for filename in batch_comp]
    video = concatenate(clips)
    video.to_videofile(outputfile,
                       codec="libx264",
                       temp_audiofile='temp-audio.m4a',
                       remove_temp=True,
                       audio_codec='aac')

    # remove partial video files
    for filename in batch_comp:
        remove(filename)

    cleanup_log_files(outputfile)
Example #2
0
def summarize(filepath, new_filename, hotclips):
    """
    Inputs a filepath for a video and generates a new shorter video
    in that same filepath.
    """
    # Only open the file once!
    video = VideoFileClip(filepath)

    chunks = [video.subclip(start, end)
              for (start, end) in hotclips]

    final_clip = concatenate(chunks)

    # txt_clip = ( TextClip("Generated by vSummarize",
    #                      fontsize=20, color='white')
    #             .set_pos('bottom')
    #             .set_duration(5))
    # final_clip = CompositeVideoClip([summarized_video, txt_clip])

    # Use the to_videofile default codec, libx264
    # libx264 is much better than mpeg4, and still writes .mp4
    # Use the fps of the original video.
    final_clip.to_videofile(new_filename,
                            fps=video.fps,
                            audio_codec='mp3')
Example #3
0
def create_supercut_in_batches(composition, outputfile, padding):
    total_clips = len(composition)
    start_index = 0
    end_index = batch_size
    batch_comp = []
    while start_index < total_clips:
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            create_supercut(composition[start_index:end_index], filename,
                            padding)
            batch_comp.append(filename)
            gc.collect()
            start_index += batch_size
            end_index += batch_size
        except:
            start_index += batch_size
            end_index += batch_size
            next

    clips = [VideoFileClip(filename) for filename in batch_comp]
    video = concatenate(clips)
    video.to_videofile(outputfile)

    #remove partial video files
    for filename in batch_comp:
        os.remove(filename)

    cleanup_log_files(outputfile)
Example #4
0
 def get_clip(self,**kargs):
     """
     """
     if self.times == None:
         raise Exception('Times not specified. Run get_times.')
     if self.clips == None:
         self.load_clips()
     
     clip_array = []
     for i,time in enumerate(self.times[:-1]):
         
         clip_start = time[0]
         clip_end = self.times[i+1][0] 
         if i in self.slowmo:
             clip_start = clip_start*self.slowmo[i]
             clip_end = clip_end*self.slowmo[i]
         
         if time[1] > 0:
             clip_start = clip_start - self.shift[time[1]-1]
             clip_end = clip_end - self.shift[time[1]-1]
         # round frames?
         if clip_start < 0:
             clip_start = 0
         if clip_end < 0:
             clip_end = 0
         if clip_start > self.clips[time[1]].duration:
             clip_start = self.clips[time[1]].duration - 1.0/self.clips[time[1]].fps
         if clip_end > self.clips[time[1]].duration:
             clip_end = self.clips[time[1]].duration
             
         clip_array.append(self.clips[time[1]].subclip(clip_start,clip_end))
         
     return concatenate(clip_array,**kargs)
Example #5
0
def create_supercut_in_batches(composition, outputfile, padding):
    """Create & concatenate video clips in groups of size BATCH_SIZE and output
    finished video file to output directory.
    """
    total_clips = len(composition)
    start_index = 0
    end_index = BATCH_SIZE
    batch_comp = []
    while start_index < total_clips:
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            create_supercut(composition[start_index:end_index], filename, padding)
            batch_comp.append(filename)
            gc.collect()
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
        except:
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
            next

    clips = [VideoFileClip(filename) for filename in batch_comp]
    video = concatenate(clips)
    video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')


    # remove partial video files
    for filename in batch_comp:
        os.remove(filename)

    cleanup_log_files(outputfile)
Example #6
0
def update():
    print "DLHSLKJD"
    outfilestr = "output/output"

    if not os.path.isdir("output/temp"):
        os.makedirs("output/temp")

    filelist = os.listdir("output/temp/")
    cliplist = []

    i = 0
    while i < len(filelist):
        cliplist.append(VideoFileClip("output/temp/" + filelist[i]))

        print i
        i += 5

    print "con cant"
    outputclip = concatenate(cliplist)

    if os.path.isfile(outfilestr + ".mp4"):
        touch(outfilestr + ".mp4")
    outputclip.to_videofile(outfilestr + ".mp4", codec="libx264")

    return
Example #7
0
    def subfx(self, fx, ta=0, tb=None, **kwargs):
        """ Apply a transformation to a part of the clip.

        Returns a new clip in which the function ``fun`` (clip->clip)
        has been applied to the subclip between times `ta` and `tb`
        (in seconds).

        Examples
        ---------

        >>> # The scene between times t=3s and t=6s in ``clip`` will be
        >>> # be played twice slower in ``newclip``
        >>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)

        """


        left = None if (ta == 0) else self.subclip(0, ta)
        center = self.subclip(ta, tb).fx(fx,**kwargs)
        right = None if (tb is None) else self.subclip(t_start=tb)

        clips = [c for c in [left, center, right] if c != None]

        # beurk, have to find other solution
        from moviepy.video.compositing.concatenate import concatenate

        return concatenate(clips).set_start(self.start)
Example #8
0
def create_supercut(composition, outputfile, padding):
    """Concatenate video clips together and output finished video file to the
    output directory.
    """
    print("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file'])
                and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [
        videofileclips[c['file']].subclip(c['start'], c['end'])
        for c in composition
    ]

    print("[+] Concatenating clips.")
    final_clip = concatenate(cut_clips)

    print("[+] Writing ouput file.")
    final_clip.to_videofile(outputfile,
                            codec="libx264",
                            temp_audiofile='temp-audio.m4a',
                            remove_temp=True,
                            audio_codec='aac')
Example #9
0
    def subfx(self, fx, ta=0, tb=None, **kwargs):
        """ Apply a transformation to a part of the clip.

        Returns a new clip in which the function ``fun`` (clip->clip)
        has been applied to the subclip between times `ta` and `tb`
        (in seconds).

        Examples
        ---------

        >>> # The scene between times t=3s and t=6s in ``clip`` will be
        >>> # be played twice slower in ``newclip``
        >>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)

        """

        left = None if (ta == 0) else self.subclip(0, ta)
        center = self.subclip(ta, tb).fx(fx, **kwargs)
        right = None if (tb is None) else self.subclip(t_start=tb)

        clips = [c for c in [left, center, right] if c != None]

        # beurk, have to find other solution
        from moviepy.video.compositing.concatenate import concatenate

        return concatenate(clips).set_start(self.start)
Example #10
0
def create_supercut_in_batches(composition, outputfile, padding):
    total_clips = len(composition)
    start_index = 0
    end_index = batch_size
    batch_comp = []
    while start_index < total_clips:
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            create_supercut(composition[start_index:end_index], filename, padding)
            batch_comp.append(filename)
            gc.collect()
            start_index += batch_size
            end_index += batch_size
        except:
            start_index += batch_size
            end_index += batch_size
            next

    clips = [VideoFileClip(filename) for filename in batch_comp]
    video = concatenate(clips)
    video.to_videofile(outputfile)

    #remove partial video files
    for filename in batch_comp:
        os.remove(filename)

    cleanup_log_files(outputfile)
Example #11
0
def time_symetrize(clip):
    """
    Returns a clip that plays the current clip once forwards and
    then once backwards. This is very practival to make video that
    loop well, e.g. to create animated GIFs.
    This effect is automatically applied to the clip's mask and audio
    if they exist.
    """
    return concatenate([clip, clip.fx( time_mirror )])
Example #12
0
    def create_slide_show_of_images(images_and_durations):
        all_slides = []
        for image, duration in images_and_durations:

            image_clip = resize_clip_if_needed(create_image_clip(image),
                                               canvas_video_size, False)
            image_clip = image_clip.set_duration(
                duration
                if duration is not None else 2)  # 2 secs is the default

            all_slides.append(image_clip)

        return concatenate(all_slides)
Example #13
0
def create_supercut_in_batches(composition, outputfile, outputtype, padding,
                               outputsubtitle):
    """Create & concatenate video clips in groups of size BATCH_SIZE and output
    finished video file to output directory.
    """
    if outputtype in ['combine', 'both']:
        total_clips = len(composition)
        start_index = 0
        end_index = BATCH_SIZE
        batch_comp = []
        while start_index < total_clips:
            filename = outputfile + '.tmp' + str(start_index) + '.mp4'
            try:
                create_supercut(composition[start_index:end_index], filename,
                                'combine', padding, False)
                batch_comp.append(filename)
                collected = gc.collect()
                print "Garbage collector: collected %d objects." % (collected)
                start_index += BATCH_SIZE
                end_index += BATCH_SIZE
            except Exception as e:
                print "create_supercut_in_batches ERROR: {}".format(e)
                start_index += BATCH_SIZE
                end_index += BATCH_SIZE
                next

        clips = [VideoFileClip(filename) for filename in batch_comp]
        video = concatenate(clips, method='compose')
        video.to_videofile(outputfile,
                           codec="libx264",
                           temp_audiofile='temp-audio.m4a',
                           remove_temp=True,
                           audio_codec='aac')

        if outputsubtitle:
            make_subtitle(composition, outputfile, padding)

        # remove partial video files
        #for filename in batch_comp:
        #os.remove(filename)

        cleanup_log_files(outputfile)

    # TODO: potentially refactor this, if user wants individual files or both individual and concat, we may be able to avoid batching with a refactor just to GC at interval with individual files
    # should test if there are any issues with just concatting all individual files and if it is just as fast, in the meantime when batching the creation of individual files for that output option will be separate
    if outputtype in ['individual', 'both']:
        # passing indivudal as combined file is already created here
        create_supercut(composition, outputfile, 'individual', padding,
                        outputsubtitle)
Example #14
0
def freeze_at_start(clip, freeze_duration=None, total_duration=None):
    """
    Makes the clip freeze on its last frame.  With ``duration`` you can
    specify the duration of the freeze. With ``total_duration`` you can
    specify the total duration of the clip and the freeze (i.e. the
    duration of the freeze is automatically calculated). If neither
    is provided, the freeze will have an infinite length.
    """
    
    freezed_clip = ImageClip(clip.get_frame(0))
    if total_duration:
        freeze_duration = total_duration - clip.duration
    if freeze_duration:
        freezed_clip = freezed_clip.set_duration(freeze_duration)
    
    return concatenate([freezed_clip,clip])
Example #15
0
def freeze_at_start(clip, freeze_duration=None, total_duration=None):
    """
    Makes the clip freeze on its last frame.  With ``duration`` you can
    specify the duration of the freeze. With ``total_duration`` you can
    specify the total duration of the clip and the freeze (i.e. the
    duration of the freeze is automatically calculated). If neither
    is provided, the freeze will have an infinite length.
    """

    freezed_clip = ImageClip(clip.get_frame(0))
    if total_duration:
        freeze_duration = total_duration - clip.duration
    if freeze_duration:
        freezed_clip = freezed_clip.set_duration(freeze_duration)

    return concatenate([freezed_clip, clip])
Example #16
0
def create_supercut(composition, outputfile, padding):
    print ("Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ( ( nextclip['file'] == clip['file'] ) and
             ( nextclip['start'] < clip['end'] )):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [videofileclips[ c['file']  ].subclip(c['start'], c['end'])
                       for c in composition]
    final_clip = concatenate( cut_clips)
    final_clip.to_videofile(outputfile)
Example #17
0
def combineVideos(outputfile):
    if not os.path.isdir("output/temp"):
        os.makedirs("output/temp")

    filelist = os.listdir("output/temp/")
    cliplist = []

    for fil in filelist:
        cliplist.append(VideoFileClip("output/temp/" + fil))

    outputclip = concatenate(cliplist)

    if os.path.isfile(outputfile + ".mp4"):
        touch(outputfile + ".mp4")
    outputclip.to_videofile(outputfile + ".mp4", codec="libx264")

    return
Example #18
0
        def segment_and_apply_effects(clip, segments):
            # subclips
            paused_clips = []
            for current_segment in segments:
                begin, end = current_segment

                if end is None:
                    # until the end
                    paused_clips += [clip.subclip(t_start=begin)]
                else:
                    paused_clips += [clip.subclip(t_start=begin, t_end=end)]

            pause_clips_with_effect = []
            for current_selected_segment in paused_clips:
                pause_clips_with_effect += fadeinout_effects(
                    current_selected_segment)

            return concatenate(pause_clips_with_effect)
Example #19
0
def create_supercut_in_batches(composition, outputfile, padding):
    """Create & concatenate video clips in groups of size BATCH_SIZE and output
    finished video file to output directory.
    """
    total_clips = len(composition)
    start_index = 0
    end_index = BATCH_SIZE
    batch_comp = []
    print 'creating supercut'
    while start_index < total_clips:
        print start_index, total_clips, batch_comp
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            print filename
            create_supercut(composition[start_index:end_index], filename,
                            padding)
            batch_comp.append(filename)
            #gc.collect()
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
        except SystemError as e:
            print e
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
            next
    print batch_comp
    clips = [VideoFileClip(filename) for filename in batch_comp]
    print clips
    video = concatenate(clips, method='chain')
    video.to_videofile(outputfile,
                       codec="libx264",
                       temp_audiofile='temp-audio.m4a',
                       remove_temp=False,
                       audio_codec='aac',
                       fps=23)

    # remove partial video files
    for filename in batch_comp:
        os.remove(filename)

    cleanup_log_files(outputfile)
Example #20
0
def summarize(filepath, new_filename, hotclips):
    """
    Inputs a filepath for a video and generates a new shorter video
    in that same filepath.
    """
    # Only open the file once!
    video = VideoFileClip(filepath)

    chunks = [video.subclip(start, end) for (start, end) in hotclips]

    final_clip = concatenate(chunks)

    # txt_clip = ( TextClip("Generated by vSummarize",
    #                      fontsize=20, color='white')
    #             .set_pos('bottom')
    #             .set_duration(5))
    # final_clip = CompositeVideoClip([summarized_video, txt_clip])

    # Use the to_videofile default codec, libx264
    # libx264 is much better than mpeg4, and still writes .mp4
    # Use the fps of the original video.
    final_clip.to_videofile(new_filename, fps=video.fps, audio_codec='mp3')
Example #21
0
def create_supercut(composition, outputfile, padding):
    """Concatenate video clips together and output finished video file to the
    output directory.
    """
    print ("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]

    print "[+] Concatenating clips."
    final_clip = concatenate(cut_clips)

    print "[+] Writing ouput file."
    final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
Example #22
0
def create_supercut(composition, outputfile, padding):
    print("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file'])
                and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [
        videofileclips[c['file']].subclip(c['start'], c['end'])
        for c in composition
    ]

    print "[+] Concatenating clips."
    final_clip = concatenate(cut_clips)

    print "[+] Writing ouput file."
    final_clip.to_videofile(outputfile, codec="libx264")
Example #23
0
def createFinalVideo(slide_clip,
                     speaker_clip,
                     audio_clip,
                     video_background_image,
                     intro_image_and_durations,
                     credit_images_and_durations,
                     fps,
                     layout=None,
                     talk_title=' ',
                     speaker_name=' ',
                     talk_date='',
                     first_segment_duration=10,
                     pauses=None,
                     output_file_name='Output',
                     codecFormat='libx264',
                     container='.mp4',
                     flagWrite=True,
                     is_test=False):
    """
    This function serves to form the video layout, create the final video and write it.

    :param slide_clip: The slide clip. This is a moviepy video object.
    :param speaker_clip: The screen clip. This is a moviepy video object.
    :param str video_background_image: the path to background image
    :param list credit_images_and_durations: a list of 2-uples containing the credit information. The first element
      of the tuple is the image file to be shozn, the second is the duration of the image in the video.
      If the duration is ``None``, it is set to 2 seconds.
    :param audio_clip: the audio_clip file to be attached to the video file. This is a moviepy audio_clip object.
    :param fps: frame per second
    :param dict layout: indicates the layout of the different video streams. This is a dictionary containing the
      following elements, each of them being a 2-uple indicating either a size or a position:

      * "canvas_video_size": The desired size of whole layout
      * "slides_video_size": The desired size of screen part
      * "speaker_video_size": The desired size of speaker part
      * "speaker_video_position": the position of the speaker substream
      * "slides_video_position": the position of the slides substream

      If any of those parameter is missing, it is replaced by the default given by the default layout.

      .. note: this layout is of course dependent on the background image

    :param str talk_title: the title of the talk. Can be a unicode string.
    :param str speaker_name: name of the speaker. Can be a unicode string.
    :param str talk_date: date of the talk
    :param first_segment_duration: Duration *in seconds* of the first segment of the video, showing the title.
        Defaults to 10 seconds.
    :param list pauses: list of pauses that will not be included in the final video. The pauses consist
        of pairs of strings that indicate a timeframe in the moviePy format.
    :param str output_file_name: the output file name without extension.
    :param str codecFormat: The codec format to write the new video into the file
        Codec to use for image encoding. Can be any codec supported by ffmpeg, but the container
        must be set accordingly.
    :param str container: the video file format (container) including all streams.
    :param bool is_test: if set to ``True``, only 10 seconds of the video are processed
    :param bool flagWrite: A flag to set whether write the new video or not

    .. rubric:: Images

    The background image is shown during the whole video.

    .. rubric:: Hints for video

    You may simply change the codecs, fps and etc inside the function.

    Video codecs:

    * ``'libx264'``: (use file extension ``.mp4``)
      makes well-compressed videos (quality tunable using 'bitrate').
    * ``'mpeg4'``: (use file extension ``.mp4``) can be an alternative
      to ``'libx264'``, and produces higher quality videos by default.
    * ``'rawvideo'``: (use file extension ``.avi``) will produce
      a video of perfect quality, of possibly very huge size.
    * ``png``: (use file extension ``.avi``) will produce a video
      of perfect quality, of smaller size than with ``rawvideo``
    * ``'libvorbis'``: (use file extension ``.ogv``) is a nice video
      format, which is completely free/ open source. However not
      everyone has the codecs installed by default on their machine.
    * ``'libvpx'``: (use file extension ``.webm``) is tiny a video
      format well indicated for web videos (with HTML5). Open source.


    .. rubric:: Hints for audio

    The parameter ``audio_clip`` may be a boolean or the path of a file.
    If ``True`` and the clip has an audio_clip clip attached, this
    audio_clip clip will be incorporated as a soundtrack in the movie.
    If ``audio_clip`` is the name of an audio_clip file, this audio_clip file
    will be incorporated as a soundtrack in the movie.

    Possible audio_clip codecs are:

    * ``'libmp3lame'``: for '.mp3'
    * ``'libvorbis'``: for 'ogg'
    * ``'libfdk_aac'``: for 'm4a',
    * ``'pcm_s16le'``: for 16-bit wav
    * ``'pcm_s32le'``: for 32-bit wav.

     Example::

        createFinalVideo(slide_clip,speaker_clip,
                        video_background_image,
                        audio_clip,
                        credit_images_and_durations,
                        fps = 30,
                        canvas_video_size = (1920, 1080),
                        slides_video_size = (1280, 960),
                        speaker_video_size = (620, 360),
                        talk_title = 'How to use SVM kernels',
                        speaker_name = 'Prof. Bernhard Schoelkopf',
                        talk_date = 'July 2015',
                        first_segment_duration = 10,
                        output_file_name = 'video',
                        codecFormat = 'libx264',
                        container = '.mp4',
                        flagWrite = True)


    """

    from moviepy.video.VideoClip import ImageClip, TextClip
    from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
    import moviepy.video.fx.all as vfx
    import moviepy.audio.fx.all as afx
    from moviepy.audio.fx.volumex import volumex

    # setting the sizes
    if layout is None:
        layout = {}

    final_layout = dict(default_layout)
    final_layout.update(layout)

    canvas_video_size = final_layout['canvas_video_size']
    slides_video_size = final_layout['slides_video_size']
    speaker_video_size = final_layout['speaker_video_size']
    speaker_video_position = final_layout['speaker_video_position']
    slides_video_position = final_layout['slides_video_position']

    # some utility functions
    def resize_clip_if_needed(clip, desired_size, preserve_aspect_ratio=True):
        if clip.w != desired_size[0] or clip.h != desired_size[1]:
            if preserve_aspect_ratio:
                aspect_ratio_target = float(desired_size[0]) / desired_size[1]
                aspect_ratio_clip = float(clip.w) / clip.h
                if aspect_ratio_clip > aspect_ratio_target:
                    return clip.resize(width=desired_size[0])
                else:
                    return clip.resize(height=desired_size[1])
            else:
                return clip.resize(desired_size)
        return clip

    def create_image_clip(image_filename):
        # having some encoding issues in moviepy
        if isinstance(image_filename, unicode):
            image_filename = image_filename.encode(sys.getdefaultencoding())

        image_filename = os.path.abspath(image_filename)
        assert (os.path.exists(image_filename))
        return ImageClip(image_filename)

    def create_slide_show_of_images(images_and_durations):
        all_slides = []
        for image, duration in images_and_durations:

            image_clip = resize_clip_if_needed(create_image_clip(image),
                                               canvas_video_size, False)
            image_clip = image_clip.set_duration(
                duration
                if duration is not None else 2)  # 2 secs is the default

            all_slides.append(image_clip)

        return concatenate(all_slides)

    if isinstance(video_background_image, unicode):
        video_background_image = video_background_image.encode(
            sys.getdefaultencoding())

    if (canvas_video_size[0] < (slides_video_size[0] or speaker_video_size[0])) and \
       (canvas_video_size[1] < (slides_video_size[1] or speaker_video_size[1])):

        logger.warning(
            "[layout] Warning: The selected sizes are not appropriate")

    ####
    # First segment: title,
    if intro_image_and_durations is None or not intro_image_and_durations:
        # this is shitty, we do not use that
        # The template for the second show

        # SHOULD NOT GO THERE!!! Not working
        assert (False)

        instituteText = "Max Planck Institute for Intelligent Systems"
        defaultText = 'Machine Learning Summer School 2015'

        left_margin = int(canvas_video_size[0] / 3) - 100
        right_margin = 100
        width = canvas_video_size[0] - left_margin - right_margin

        pixelPerCharTitleText = width // len(talk_title)
        pixelPerCharSpeakerText = width // len(speaker_name)

        if isinstance(talk_title, unicode):
            talk_title = talk_title.encode('utf8')

        txtClipTitleText = TextClip(talk_title,
                                    fontsize=pixelPerCharTitleText,
                                    color='white',
                                    font="Amiri")
        txtClipTitleText = txtClipTitleText.set_position(
            (left_margin, canvas_video_size[1] / 3))

        if isinstance(speaker_name, unicode):
            speaker_name = speaker_name.encode('utf8')

        txtClipSpeakerText = TextClip(speaker_name,
                                      fontsize=pixelPerCharSpeakerText,
                                      color='white',
                                      font="Amiri")
        txtClipSpeakerText = txtClipSpeakerText.set_position(
            (left_margin, canvas_video_size[1] / 3 + 100))

        txtClipInstituteText = TextClip(instituteText,
                                        fontsize=36,
                                        color='white',
                                        font="Amiri")
        txtClipInstituteText = txtClipInstituteText.set_position(
            (left_margin, canvas_video_size[1] / 3 + 170))

        txtClipDefaultText = TextClip(defaultText,
                                      fontsize=40,
                                      color='white',
                                      font="Amiri")
        txtClipDefaultText = txtClipDefaultText.set_position(
            (left_margin, canvas_video_size[1] / 3 + 300))

        txtClipDateText = TextClip(talk_date,
                                   fontsize=40,
                                   color='white',
                                   font="Amiri")
        txtClipDateText = txtClipDateText.set_position(
            (left_margin, canvas_video_size[1] / 3 + 350))

        # this does not work, the sizes should be set properly
        # and the fonts are ugly
        first_segment_clip = CompositeVideoClip(
            [txtClipTitleText]
        )  #, txtClipSpeakerText, txtClipInstituteText, txtClipDefaultText, txtClipDateText])
        first_segment_clip = first_segment_clip.set_duration(
            first_segment_duration)
        #first_segment_clip = first_segment_clip.set_start(0)

    else:
        first_segment_clip = create_slide_show_of_images(
            intro_image_and_durations)

    ####
    # second segment: the slides, videos, audio_clip, etc.
    # resizing the slides and speaker clips if needed
    speaker_clip_composed = resize_clip_if_needed(speaker_clip,
                                                  speaker_video_size)
    slide_clip_composed = resize_clip_if_needed(slide_clip, slides_video_size)

    if audio_clip is not None:
        # the audio_clip is associated to this clip
        speaker_clip_composed = speaker_clip_composed.set_audio(audio_clip)
        # the audio channel needs to be set for this one as well, otherwise
        # the final result will depend on how this stream/video clip was opened (audio is on by default)
        slide_clip_composed = slide_clip_composed.set_audio(audio_clip)

    # this one will be used for reference on properties
    # apparently in MoviePy, we cannot nest CompositeVideoClip, which is why it is done this way
    # and not putting this part into a CompositeVideoClip
    second_segment_clip = slide_clip_composed

    # handling the pauses and the start/stop of the video
    #import ipdb
    #ipdb.set_trace()

    # transform pauses into segments
    segments = None
    if pauses is not None and pauses:
        segments = []
        last_start = 0
        for begin_pause, end_pause in pauses:
            assert (begin_pause is not None or end_pause is not None)
            if begin_pause is None:
                logger.info(
                    '[VIDEO][CROP] removing start segment ending at %s',
                    end_pause)
                last_start = end_pause
            elif end_pause is None:
                logger.info(
                    '[VIDEO][CROP] removing end_pause segment starting at %s',
                    begin_pause)
                segments += [(last_start, begin_pause)]
            else:
                logger.info(
                    '[VIDEO][CROP] removing intermediate segment %s <--> %s',
                    begin_pause, end_pause)
                segments += [(last_start, begin_pause)]
                last_start = end_pause

        if not segments:
            # in case we get only rid of the beginning
            segments += [(last_start, None)]

        # apply effects
        def fadeinout_effects(clip_effect,
                              fadein_duration=2,
                              fadeout_duration=2):
            ret = []
            start = 0  # start/end segment without effect
            end = None
            if (fadein_duration > 0):
                ret += [
                    clip_effect.subclip(0, fadein_duration).fx(
                        vfx.fadein,
                        duration=fadein_duration).afx(afx.audio_fadein,
                                                      fadein_duration)
                ]
                start = fadein_duration

            if (fadeout_duration > 0):
                end = -fadeout_duration

            ret += [clip_effect.subclip(start, end)]

            if (fadeout_duration > 0):
                ret += [
                    clip_effect.subclip(clip_effect.duration -
                                        fadeout_duration).fx(
                                            vfx.fadeout,
                                            duration=fadeout_duration).afx(
                                                afx.audio_fadeout,
                                                fadeout_duration)
                ]

            return ret

        def segment_and_apply_effects(clip, segments):
            # subclips
            paused_clips = []
            for current_segment in segments:
                begin, end = current_segment

                if end is None:
                    # until the end
                    paused_clips += [clip.subclip(t_start=begin)]
                else:
                    paused_clips += [clip.subclip(t_start=begin, t_end=end)]

            pause_clips_with_effect = []
            for current_selected_segment in paused_clips:
                pause_clips_with_effect += fadeinout_effects(
                    current_selected_segment)

            return concatenate(pause_clips_with_effect)

        # apply it to speaker and slide clips
        slide_clip_composed = segment_and_apply_effects(
            slide_clip_composed, segments)
        speaker_clip_composed = segment_and_apply_effects(
            speaker_clip_composed, segments)

    # making processing shorter if this is a test
    if is_test:
        duration = int(slide_clip_composed.duration)
        current_start = 0
        out1 = []
        out2 = []
        while current_start < duration:
            out1 += [
                slide_clip_composed.subclip(current_start, current_start + 3)
            ]
            out2 += [
                speaker_clip_composed.subclip(current_start, current_start + 3)
            ]
            current_start += 10 * 60  # every 10 mins

        slide_clip_composed = concatenate(out1)
        speaker_clip_composed = concatenate(out2)

    # we take again the slide clip as the reference one
    second_segment_clip = slide_clip_composed

    ####
    # second segment overlay: title, info, background: duration equal to the second segment clip
    # placement of the two streams above
    if isinstance(speaker_name, unicode):
        # some issues transforming strings. What is really expecting MoviePy? apparently utf8 works fine
        # warning the utf8 is applied to the full /unicode/ string
        info_underslides = (u'%s - %s' %
                            (speaker_name, talk_title)).encode('utf8')
    else:
        info_underslides = '%s - %s' % (speaker_name, talk_title)

    # use the specific font from the layout
    list_fonts = TextClip.list('font')
    lower_case_font = [i.lower() for i in list_fonts]
    index_desired_font = lower_case_font.index(
        final_layout['font']
    ) if final_layout['font'] in lower_case_font else None
    final_font = list_fonts[
        index_desired_font] if index_desired_font else final_layout[
            'font-fallback']
    talk_info_clip = TextClip(info_underslides,
                              fontsize=30,
                              color='white',
                              font=final_font)

    # center in height/width if resize uses aspect ratio conservation
    centered_speaker_video_position = list(speaker_video_position)
    if speaker_clip_composed.w != speaker_video_size[0]:
        assert (speaker_clip_composed.w < speaker_video_size[0])
        centered_speaker_video_position[0] += (speaker_video_size[0] -
                                               speaker_clip_composed.w) // 2
    if speaker_clip_composed.h != speaker_video_size[1]:
        assert (speaker_clip_composed.h < speaker_video_size[1])
        centered_speaker_video_position[1] += (speaker_video_size[1] -
                                               speaker_clip_composed.h) // 2
    speaker_clip_composed = speaker_clip_composed.set_position(
        centered_speaker_video_position)

    slide_clip_composed = slide_clip_composed.set_position(
        slides_video_position)

    # talk texttual information
    talk_info_clip = talk_info_clip.set_position(
        (slide_clip_composed.pos(0)[0],
         slide_clip_composed.pos(0)[1] + slide_clip_composed.h + 15))

    # background
    background_image_clip = resize_clip_if_needed(
        create_image_clip(video_background_image), canvas_video_size)

    # stacking
    second_segment_overlay_clip = CompositeVideoClip([
        background_image_clip, talk_info_clip, speaker_clip_composed,
        slide_clip_composed
    ])

    # same attributes as the clip it is supposed to overlay
    second_segment_overlay_clip = second_segment_overlay_clip.set_duration(
        second_segment_clip.duration)

    ###
    # third segment: credits etc.
    third_segment_clip = create_slide_show_of_images(
        credit_images_and_durations)

    # the final video
    outputVideo = concatenate(
        [first_segment_clip, second_segment_overlay_clip, third_segment_clip])

    if flagWrite:
        kw_additional_args = {}
        if is_test:
            kw_additional_args['threads'] = 4
            kw_additional_args['preset'] = 'ultrafast'
        outputVideo.write_videofile(output_file_name + container,
                                    fps,
                                    codec=codecFormat,
                                    **kw_additional_args)

    return outputVideo
Example #24
0
def create_supercut(composition, outputfile, outputtype, padding,
                    outputsubtitle):
    """Concatenate video clips together and output finished video file to the
    output directory.
    """
    print("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file'])
                and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    #  dict of VideoFileClip instances by video file name, one for each original video file matched
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    # list of VideoFileClip instances, one for each matching sub clip in all matching videos
    cut_clips = [
        videofileclips[c['file']].subclip(c['start'], c['end'])
        for c in composition
    ]

    if outputtype in ['individual', 'both']:
        clip_output_dir = outputfile + '_clips'
        print "[+] Exporting individual clips for " + str(
            len(videofileclips)) + ' source videos to '
        #comp properties: matches, start, end, file, time, line
        for i, c in enumerate(composition):
            file = os.path.basename(c['file'])
            # replace non alphanumeric chars with underscore
            file = re.sub('[^0-9a-zA-Z]+', '_', file)
            line = c['line']
            line = re.sub('[^0-9a-zA-Z]+', '_', line)
            start = c['start']
            end = c['end']
            if not os.path.exists(clip_output_dir):
                os.makedirs(clip_output_dir)
            newfilepath = '{}/{}_{}_-_{}.mp4'.format(clip_output_dir, i + 1,
                                                     file, line)
            if outputsubtitle:
                make_subtitle([c], newfilepath, padding)
            #print "Writing individual clip: {}".format(newfilepath)
            cut_clips[i].to_videofile(newfilepath,
                                      codec="libx264",
                                      temp_audiofile='temp-audio.m4a',
                                      remove_temp=True,
                                      audio_codec='aac')

    if outputtype in ['combine', 'both']:
        print "[+] Concatenating clips."
        final_clip = concatenate(cut_clips, method='compose')

        print "[+] Writing combined ouput file."
        final_clip.to_videofile(outputfile,
                                codec="libx264",
                                temp_audiofile='temp-audio.m4a',
                                remove_temp=True,
                                audio_codec='aac')

        if outputsubtitle:
            make_subtitle(composition, outputfile, padding)
        sentence_end= False
        current_sentence += 1
    else:
        sentence_list[current_sentence] += ' ' + caption.text.replace('\n',' ')

    text += caption.text + ' '



# runs text through summarizer
summarizer(text)

# converts mp4 file into video clip for further processing
myclip = VideoFileClip(vid_file_list[0])

# runs through each sentence of summary
for i in range(len(summary_list)-1):
    # determines which sentence number it is
    index = sentence_list.index(summary_list[i])
    # creates a video clip using the start and end time of the the sentence
    video_clips.append(myclip.subclip(time_stamp_to_seconds(start_list[index]), time_stamp_to_seconds(end_list[index])))


# combines all video clips
final_clip = concatenate(video_clips)
# creates mp4 file
final_clip.write_videofile(summary_directory + '/' + summary_name + ".mp4")