Ejemplo n.º 1
0
def convert_video(srtfile, xgenerator, invideo, outvideo):
    sub = SubtitlesClip(srtfile, xgenerator)
    # sub.set_position(("center", "bottom"), relative=True)
    myvideo = VideoFileClip(invideo)
    final = CompositeVideoClip(
        [myvideo, sub.set_position((0.2, 0.8), relative=True)])
    final.to_videofile(outvideo, fps=myvideo.fps)
Ejemplo n.º 2
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
Ejemplo n.º 3
0
def test_subtitles(util):
    red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(
        txt,
        font=util.FONT,
        size=(800, 600),
        font_size=24,
        method="caption",
        align="South",
        color="white",
    )

    subtitles = SubtitlesClip("media/subtitles.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.subclip(0, 0.5).write_videofile(
        os.path.join(util.TMP_DIR, "subtitles.mp4"),
        fps=5,
        logger=None,
    )

    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA

    subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator)
    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
Ejemplo n.º 4
0
def test_subtitles():
    from moviepy.video.tools.subtitles import SubtitlesClip

    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    #travis does not like TextClip.. so return for now..
    #but allow regular users to still run the test below
    if TRAVIS:
        return

    generator = lambda txt: TextClip(txt,
                                     font='Georgia-Regular',
                                     size=(800, 600),
                                     fontsize=24,
                                     method='caption',
                                     align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
Ejemplo n.º 5
0
def compose_subs(vid_file, sub_file):
    vidclip = VideoFileClip(vid_file)
    # Scale effects to input video:
    sub_opts = make_sub_opts(vidclip)
    generator = partial(sub_generator, **sub_opts)

    txtclip = SubtitlesClip(sub_file, generator)
    return CompositeVideoClip([vidclip, txtclip.set_pos("top")])
    def __generateSubs( self, clip, subPath ):

        generator = lambda txt: TextClip(txt, font='Amiri-Bold', fontsize=40, color='white')

        subs = SubtitlesClip( subPath, generator )
        subs = subs.subclip( 0, clip.duration - .001 )
        subs.set_duration( clip.duration - .001 )

        return subs
def createVideo(originalClipName, subtitlesFileName, outputFileName,
                alternateAudioFileName, useOriginalAudio):
    # This function is used to put all of the pieces together.
    # Note that if we need to use an alternate audio track, the last parm should = False

    print("\n==> createVideo ")

    # Load the original clip
    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Reading video clip: " + originalClipName)
    clip = VideoFileClip(originalClipName)

    print("\t\t==> Original clip duration: " + str(clip.duration))

    if useOriginalAudio == False:
        print(strftime("\t" + "%H:%M:%S", gmtime()),
              "Reading alternate audio track: " + alternateAudioFileName)
        audio = AudioFileClip(alternateAudioFileName)
        audio = audio.subclip(0, clip.duration)
        audio.set_duration(clip.duration)
        print("\t\t==> Audio duration: " + str(audio.duration))
        clip = clip.set_audio(audio)
    else:
        print(strftime("\t" + "%H:%M:%S", gmtime()),
              "Using original audio track...")

    # Create a lambda function that will be used to generate the subtitles for each sequence in the SRT
    generator = lambda txt: TextClip(
        txt, font='Arial-Bold', fontsize=24, color='white')

    # read in the subtitles files
    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Reading subtitle file: " + subtitlesFileName)
    subs = SubtitlesClip(subtitlesFileName, generator)
    print("\t\t==> Subtitles duration before: " + str(subs.duration))
    subs = subs.subclip(0, ((clip.duration) - 0.001))
    subs.set_duration(clip.duration - .001)
    print("\t\t==> Subtitles duration after: " + str(subs.duration))
    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Reading subtitle file complete: " + subtitlesFileName)

    print("\t" + strftime("%H:%M:%S", gmtime()), "Creating Subtitles Track...")
    annotated_clips = [
        annotate(clip.subclip(from_t, to_t), txt)
        for (from_t, to_t), txt in subs
    ]

    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Creating composited video: " + outputFileName)
    # Overlay the text clip on the first video clip
    final = concatenate_videoclips(annotated_clips)

    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Writing video file: " + outputFileName)
    final.write_videofile(outputFileName)
    clip.reader.close()
    clip.audio.reader.close_proc()
Ejemplo n.º 8
0
def burn(srtPath, videoPath, outputPath):
    d = compress_dimension_with_rotation_handled(videoPath)
    videoPath = convert_mov_to_mp4(videoPath)
    sub = SubtitlesClip(srtPath, generator)
    myvideo = VideoFileClip(videoPath, target_resolution=d)

    final = CompositeVideoClip([myvideo, sub.set_pos(('center', 'bottom'))])
    temp_output = os.path.splitext(outputPath)[0] + '.mp4'
    if not os.path.isfile(temp_output):
        final.write_videofile(temp_output, fps=myvideo.fps)
    convert_mp4_to_mov(temp_output)
Ejemplo n.º 9
0
def gifEngine(starttime,
              endtime,
              videofileloc,
              srtfileloc,
              outfileloc,
              logger='gifEngine.log'):
    logging.basicConfig(filename=logger, level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    prolog.basic_config()
    # creating the initial GIF
    try:
        generator = lambda txt: TextClip(
            txt, font='Impact', fontsize=28, color='white')
        video = VideoFileClip(videofileloc)
        sub = SubtitlesClip(srtfileloc, generator).set_position(
            ("center", "bottom"), relative=True)
        composite = CompositeVideoClip([video, sub])
        composite = composite.subclip(starttime, endtime)
        composite.write_gif(outfileloc,
                            program='ffmpeg',
                            opt='palettegen',
                            logger=logger,
                            verbose=True)  # using new palettegen opt
        return 0
    except (IOError, OSError) as err:
        return err
Ejemplo n.º 10
0
def make_video_with_subtitles(video_path, user_id):
    generator = lambda txt: moviepy.editor.TextClip(
        txt, font='Arial', fontsize=30, color='white')
    sub = file_to_subtitles(MAIN_DIRECTORY + '%d/subtitles.srt' % user_id)
    subtitles = SubtitlesClip(sub, generator)

    video = moviepy.editor.VideoFileClip(video_path)
    result = moviepy.editor.CompositeVideoClip(
        [video, subtitles.set_position(('center', 'bottom'))])

    result.to_videofile(RESULT_DIRECTORY + 'video_%d.mp4' % (user_id),
                        fps=video.fps,
                        audio_codec='libmp3lame',
                        verbose=False,
                        logger=None,
                        temp_audiofile=RESULT_DIRECTORY + 'temp-audio_%d.mp3' %
                        (user_id),
                        remove_temp=True)
Ejemplo n.º 11
0
def add_subtitle(video_path, default_subtitle_path, translated_subtitle_path):
    if default_subtitle_path is None:
        return
    default_subtitle = margin(clip=SubtitlesClip(
        default_subtitle_path, default_subtitle_generator()).set_position(
            ('center', 'bottom')),
                              bottom=80,
                              opacity=0)
    translated_subtitle = margin(clip=SubtitlesClip(
        translated_subtitle_path,
        translation_subtitle_generator()).set_position(('center', 'bottom')),
                                 bottom=40,
                                 opacity=0)
    video = VideoFileClip(video_path, audio=True)
    composed_video = CompositeVideoClip(
        [video, default_subtitle, translated_subtitle])
    output_filename = replace_extension(
        add_prefix_to_filename(video_path, '[WITH-SUBTITLE] '), '.mp4')
    composed_video.write_videofile(output_filename, threads=2, fps=video.fps)
Ejemplo n.º 12
0
def add_subtitle(video_path, subtitle_path, filename=None):
    generator: Callable[[Any], TextClip] = lambda txt: TextClip(txt,
                                                                font='assets/font/GothamMedium.ttf',
                                                                fontsize=45, color='white',
                                                                bg_color='#00000066')
    subtitle = margin(clip=SubtitlesClip(subtitle_path, generator).set_position(('center', 'bottom')), bottom=35, opacity=0)
    video = VideoFileClip(video_path, audio=True)
    composed_video = CompositeVideoClip([video, subtitle])
    output_filename = filename or replace_extension(add_prefix_to_filename(video_path, '[WITH-SUBTITLE] '), '.mp4')
    composed_video.write_videofile(output_filename,
                                   threads=2,
                                   fps=video.fps)
Ejemplo n.º 13
0
def stitch_audio(sentences, audioDir, movieFile, outFile, srtPath=None, overlayGain=-30):
    """Combines sentences, audio clips, and video file into the ultimate dubbed video

    Args:
        sentences (list): Output of parse_sentence_with_speaker
        audioDir (String): Directory containing generated audio files to stitch together
        movieFile (String): Path to movie file to dub.
        outFile (String): Where to write dubbed movie.
        srtPath (String, optional): Path to transcript/srt file, if desired.
        overlayGain (int, optional): How quiet to make source audio when overlaying dubs. 
            Defaults to -30.

    Returns:
       void : Writes movie file to outFile path
    """

    # Files in the audioDir should be labeled 0.wav, 1.wav, etc.
    audioFiles = os.listdir(audioDir)
    audioFiles.sort(key=lambda x: int(x.split('.')[0]))

    # Grab the computer-generated audio file
    segments = [AudioSegment.from_mp3(
        os.path.join(audioDir, x)) for x in audioFiles]
    # Also, grab the original audio
    dubbed = AudioSegment.from_file(movieFile)

    # Place each computer-generated audio at the correct timestamp
    for sentence, segment in zip(sentences, segments):
        dubbed = dubbed.overlay(
            segment, position=sentence['start_time'] * 1000, gain_during_overlay=overlayGain)
    # Write the final audio to a temporary output file
    audioFile = tempfile.NamedTemporaryFile()
    dubbed.export(audioFile)
    audioFile.flush()

    # Add the new audio to the video and save it
    clip = VideoFileClip(movieFile)
    audio = AudioFileClip(audioFile.name)
    clip = clip.set_audio(audio)

    # Add transcripts, if supplied
    if srtPath:
        width, height = clip.size[0] * 0.75, clip.size[1] * 0.20
        def generator(txt): return TextClip(txt, font='Georgia-Regular',
                                            size=[width, height], color='black', method="caption")
        subtitles = SubtitlesClip(
            srtPath, generator).set_pos(("center", "bottom"))
        clip = CompositeVideoClip([clip, subtitles])

    clip.write_videofile(outFile, codec='libx264', audio_codec='aac')
    audioFile.close()
Ejemplo n.º 14
0
def test_PR_1137_subtitles():
    """Test support for path-like objects as arguments for SubtitlesClip."""
    def make_textclip(txt):
        return TextClip(
            txt,
            font=FONT,
            font_size=24,
            color="white",
            stroke_color="black",
            stroke_width=0.5,
        )

    SubtitlesClip(Path("media/subtitles.srt"),
                  make_textclip=make_textclip).close()
Ejemplo n.º 15
0
    def add_subtitle(self,subtitle_file,out_name):
        try:
            generator = lambda txt: TextClip(txt, font='Times', fontsize=16, color='white')
            subtitles = SubtitlesClip(subtitle_file, generator)

            self.result = CompositeVideoClip([self.clip, subtitles.set_pos(('center','bottom'))])

            self.result.write_videofile(out_name, fps=self.clip.fps)

            """
                Metoda "add_subtitle" permite adaugarea de subtitrari in cadrul videoclipurilor.

                Aceasta metoda se foloseste atat de biblioteca "Moviepy", aceasta punand la dispozitie
                metodele de scriere/citire a fisierelor video si clasa "SubtitlesClip" ce permite adaugarea
                subtitrarilor, dar se foloseste si de utilitarul "ImageMagickDisplay", acest utilitar
                realizand procesul de adaugare a textului peste fisierul video.

                Dupa cum se poate observa mai sus, putem sa selectam fontul textului, dimensiunea acestuia,
                dar si culoarea acestuia.
            """

        except:
            print("Something went wrong with add_subtitle method")
Ejemplo n.º 16
0
    def add_subtitles(
        self,
        subtitles_path: Path,
        color: str = DEFAULT_SUBTITLE_COLOR,
        font: str = DEFAULT_SUBTITLE_FONT,
        fontsize: int = DEFAULT_SUBTITLE_FONTSIZE,
    ):
        """Currently unused"""
        def subtitle_text_clip_factory(text: str) -> TextClip:
            return TextClip(text, font, fontsize, color)

        subtitles_clip = SubtitlesClip(subtitles_path,
                                       subtitle_text_clip_factory)
        self.video_file_clip = CompositeVideoClip(
            [self.video_file_clip, subtitles_clip])
Ejemplo n.º 17
0
def stitch_audio(sentences,
                 audio_dir,
                 movie_file,
                 output,
                 srt_path=None,
                 overlay_gain=-30):
    # combines sentences, audio clips, and video file into the dubbed output
    audio_files = os.listdir(audio_dir)
    audio_files.sort(key=lambda x: int(x.split('.')[0]))

    segments = [
        AudioSegment.from_mp3(os.path.join(audio_dir, x)) for x in audio_files
    ]
    dubbed = AudioSegment.from_file(movie_file)

    for sentence, segment in zip(sentences, segments):
        # place each generated audio at the correct timestamp
        dubbed = dubbed.overlay(segment,
                                position=sentence['start_time'] * 1000,
                                gain_during_overlay=overlay_gain)

    # wirte the final audio to a temp file
    audio_file = tempfile.NamedTemporaryFile()
    dubbed.export(audio_file)
    audio_file.flush()

    # add new audio to the video and save it
    clip = VideoFileClip(movie_file)
    audio = AudioFileClip(audio_file.name)
    clip = clip.set_audio(audio)

    # add transcripts if there is any
    if srt_path:
        width, height = clip.size[0] * 0.75, clip.size[1] * 0.20

        def generator(txt):
            return TextClip(txt,
                            fond='Georgia-Regular',
                            size=[width, height],
                            color='black',
                            method='caption')

        subtitles = SubtitlesClip(srt_path, generator).set_position(
            ('center', 'bottom'))
        clip = CompositeVideoClip([clip, subtitles])

    clip.write_videofile(output, codec='libx264', audio_codec='aac')
    audio_file.close()
Ejemplo n.º 18
0
def add_subtitle(video_path, subtitle_path):
    generator: Callable[
        [Any],
        TextClip] = lambda txt: TextClip(txt,
                                         font='./fonts/GothamMedium.ttf',
                                         fontsize=45,
                                         color='white',
                                         bg_color='#00000066')
    subtitle = margin(clip=SubtitlesClip(subtitle_path,
                                         generator).set_position(
                                             ('center', 'bottom')),
                      bottom=35,
                      opacity=0)
    video = VideoFileClip(video_path, audio=True)
    composed_video = CompositeVideoClip([video, subtitle])
    composed_video.write_videofile("out.mp4", threads=4, fps=video.fps)
Ejemplo n.º 19
0
def make_subtitlesclip(subtitles_map, hewn_size, left, right, srt_padding,
                       vpos):
    spu, spec = subtitles_map.current()
    if spu == -1:
        return None

    _, srt = spec
    subsrt_path = subsrt(srt, left, right, srt_padding)
    if subsrt_path is None:
        return None

    w, h = hewn_size
    # NOTE: Stick to the screen edge when using bg,
    # put some margins otherwise.
    size = (int(w * 0.8), None)
    margin_h = int(h * 0.05)

    def make_textclip(txt):
        # NOTE: imagemagick may fail to retrieve the font, 'ArialUnicode', configured here.
        # To make sure 'ArialUnicode' exists,
        # check '/usr/local/Cellar/imagemagick/7.0.10-28/etc/ImageMagick-7/type.xml'
        # and add a font declaration like:
        # <type
        #   format="ttf"
        #   name="ArialUnicode"
        #   fullname="Arial Unicode MS"
        #   family="Arial Unicode"
        #   glyphs="/System/Library/Fonts/Supplemental/Arial Unicode.ttf" />
        # To check if it's correctly configuared:
        # python -c 'from moviepy.editor import TextClip; print(TextClip.list("font"))'
        return TextClip(txt,
                        size=size,
                        method='caption',
                        align='center',
                        font='ArialUnicode',
                        fontsize=36,
                        color='white',
                        bg_color='rgba(0,0,0,0.6)')

    subtitlesclip = SubtitlesClip(subsrt_path, make_textclip)

    def blit_on(self, picture, t):
        # Monkey patch 'blit_on' to place subtitle relative to its dynamic size
        # The block below copied from moviepy.
        #################################################################################################
        hf, wf = framesize = picture.shape[:2]

        if self.ismask and picture.max():
            return np.minimum(1,
                              picture + self.blit_on(np.zeros(framesize), t))

        ct = t - self.start  # clip time

        # GET IMAGE AND MASK IF ANY

        img = self.get_frame(ct)
        mask = self.mask.get_frame(ct) if self.mask else None

        if mask is not None and ((img.shape[0] != mask.shape[0]) or
                                 (img.shape[1] != mask.shape[1])):
            img = self.fill_array(img, mask.shape)

        hi, wi = img.shape[:2]
        #################################################################################################
        pos_x = (wf - wi) / 2  # center
        pos_y = margin_h if vpos == 'top' else (hf - hi - margin_h)
        pos = map(int, (pos_x, pos_y))
        from moviepy.video.tools.drawing import blit
        return blit(img, picture, pos, mask=mask, ismask=self.ismask)

    subtitlesclip.blit_on = blit_on.__get__(subtitlesclip)
    return subtitlesclip
                # print("Sphinx thinks you said: " + r.recognize_sphinx(audio,language="de-DE"))
            except sr.UnknownValueError:
                print("Google could not understand audio")
            except sr.RequestError as e:
                #Need to build in an redo
                print("Goole error; {0}".format(e))

    print(subtitels_text)

if translate:
    print('Translate')
    from googletrans import Translator
    t = Translator()
    subtitels_text = [(x[0], t.translate(x[1], dest='en', src='de').text)
                      for x in subtitels_text]

if merge_subtitel:
    print('Subtitels')
    #check()

    subtitels_text_ = [(x[0], '\n'.join(textwrap.wrap(x[1], 100)))
                       for x in subtitels_text]


    subtitles = SubtitlesClip(subtitels_text_)\
            .set_pos(('center','top'))

    myvideo = video.subclip(0, 59)

    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile("data/final.mp4")  #, fps=myvideo.fps)
Ejemplo n.º 21
0
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 23:46:36 2019

@author: Administrator
"""

from moviepy.editor import *
from moviepy.video.tools.subtitles import SubtitlesClip
from moviepy.video.fx import resize
from moviepy.video.tools.segmenting import findObjects

# 导入字幕
generator = lambda txt: TextClip(
    txt, font='SimHei', fontsize=24, color='black')
sub = SubtitlesClip("welcome.srt", generator)
txt_clip = TextClip("good!", fontsize=70, color='white')
txt_clip = txt_clip.set_pos('center').set_duration(10)
# 片头
start_clip = VideoFileClip("VID_20190926_163202.mp4").subclip(
    0, 3.0).rotate(90).resize((1500, 750))
# 合成字幕
start_clip = CompositeVideoClip([start_clip, sub])

print('finish video')
final_clip.write_videofile("my_stack.mp4", fps=10)
# start_clip = CompositeVideoClip([start_clip, txt_clip])
# 片尾
end_clip = ImageClip("end.jpg")
end_clip = end_clip.set_duration(2).resize((1500, 750))
Ejemplo n.º 22
0

def sub_config(txt):
    text_clip = TextClip(txt,
                         font='Helvetica Neue',
                         fontsize=40,
                         color='white')
    text_clip = text_clip.on_color(size=(int(text_clip.w * 1.05),
                                         int(text_clip.h * 1.05)),
                                   col_opacity=0.5)
    return text_clip


# generator = lambda txt: TextClip(txt, font='Helvetica Neue', fontsize=40, color='white').on_color(col_opacity=0.5)

subtitles = SubtitlesClip("The Power of Chlorophyll.srt", sub_config)
intro_video = VideoFileClip("Intro HanaPhuong 1.mp4")
video = VideoFileClip("The Power of Chlorophyll.mp4")

result = CompositeVideoClip(
    [video, subtitles.set_position(('center', 0.8), relative=True)])
result = concatenate_videoclips([intro_video, result], method='compose')
result.write_videofile("The Power of Chlorophyll-yt.mp4",
                       fps=video.fps,
                       temp_audiofile="temp-audio.m4a",
                       remove_temp=True,
                       codec="libx264",
                       audio_codec="aac")

# srt file : add thêm 1 line ở đít
# file nếu ko phải mp4 thì convert về mp
Ejemplo n.º 23
0
                              fontsize=fontsize,
                              color='white',
                              method='caption',
                              align=direction,
                              size=size)
            else:
                tc = TextClip(txt,
                              font='Nunito',
                              fontsize=fontsize,
                              color='white',
                              method='caption',
                              align='center',
                              size=(512, 25))
            return tc

        return generator

    sub_1 = SubtitlesClip(s_up, make_textclip=create_generator('North', 8))
    sub_2 = SubtitlesClip(s_down, make_textclip=create_generator('South', 24))

    sub_1.end = sub_2.end

    final = CompositeVideoClip([clip, sub_1, sub_2], size=size)
else:
    final = clip

final.write_videofile(outname,
                      codec='libx264',
                      audio_codec='aac',
                      fps=clip.fps)
Ejemplo n.º 24
0
    def func_edit(self):
        import os
        import os.path

        from moviepy.video import VideoClip
        from moviepy.editor import VideoFileClip, vfx, concatenate_videoclips, CompositeVideoClip, \
            ImageClip, TextClip
        # from moviepy.video.compositing import CompositeVideoClip
        from moviepy.video.tools.subtitles import SubtitlesClip

        from googletrans import Translator

        def translat(text='no text is passed'):
            trans = Translator()
            result = trans.translate(text, dest='zh-CN', src='en').text
            # Translated(src=en, dest=zh-cn, text=你好, pronunciation=Nǐ hǎo, extra_data="{'translat...")
            # print(result.text)

            return result

        def translat_subtitle(file):

            for i, line in enumerate(file.readline()):
                print(i, line)
                translated_sub = open(r'/home/master/subtitle/translated/{}.srt'.format(en_title), 'w',encoding='utf-8')

                if i % 4 == 2 or i == 2:
                    # doc=''
                    # doc=doc+str(line)
                    translated_line = translat(line)
                    translated_sub.write(translated_line)
                else:
                    translated_sub.write(line)

            return translated_sub

        for mp4 in os.listdir(r'/home/master/unvideos'):

            en_title = os.path.basename(mp4).split('.')[0]
            zh_title = translat(str(en_title))
            print(zh_title)

            main_clip = VideoFileClip(r'/home/master/unvideos/{}'.format(mp4))

            leader = VideoFileClip(r'./material/leader.mp4')
            main_clip=main_clip.resize(leader.size)

            # leader.duration=3
            # clip1=clip.fx(vfx.mirror_x)
            # clip2=clip.fx(vfx.mirror_y)
            # clip2=clip.resize(0.5)

            concatenate = concatenate_videoclips([leader, main_clip])

            logo = ImageClip(r'./material/logo.png')
            logo.duration = main_clip.duration
            logo.resize((350,150))
            # logo_end_gif=


            if os.path.exists(r'/home/master/subtitle/{}.srt'.format(en_title)):

                with open(r'/home/master/subtitle/{}.srt'.format(en_title), 'rb') as f:
                    pass
                    # print(f.read())
                    # en_sub=f.read()
                    # zh_sub=translat(en_sub)
                    # zh_srt=open(r'./subtitle/translated/{}.srt'.format(en_title),'wb')
                    # zh_srt.write(zh_sub)

                    # zh_srt=translat_subtitle(f)

                font = "ArialUnicode"
                color = 'white'
                generator = lambda txt: TextClip(txt, font=font, fontsize=40, color=color)
                # sub=SubtitlesClip(r'./subtitle/translated/{}.srt'.format(en_title),'rb')
                sub = SubtitlesClip(r'/home/master/subtitle/{}.srt'.format(en_title), generator)

                # final=clips_array([[clip1,clip2]])

                final = CompositeVideoClip([concatenate,
                                            sub.set_start(3).set_pos('bottom'),
                                            logo.set_start(3).set_pos((1400,100)).crossfadein(2)])

                # final.write_videofile('add_subtitle.mp4',fps=clip.fps)

                final.write_videofile('/home/master/edited/{}.mp4'.format(en_title), fps=main_clip.fps)

            else:
                final = CompositeVideoClip([concatenate,
                                            logo.set_start(3).set_pos((1400,100)).crossfadein(2)])
                final.write_videofile('/home/master/edited/{}.mp4'.format(en_title), fps=main_clip.fps,audio=True,verbose=True)
Ejemplo n.º 25
0
from moviepy.editor import *
from moviepy.video.tools.subtitles import SubtitlesClip

generator = lambda txt: TextClip(txt, font='Arial', fontsize=24, color='black')

f = open('enresult.txt', 'r', encoding='utf-8')
Lines = f.readlines()

subs = []
count = 0
for line in Lines:
    subs.append(((count, count + 10), line))
    count += 10

subtitles = SubtitlesClip(subs, generator)

video = VideoFileClip("demo.mp4")
result = CompositeVideoClip([video, subtitles.set_pos(('center', 'bottom'))])

result.write_videofile("output.mp4")
Ejemplo n.º 26
0
        with open(r'./subtitle/{}.srt'.format(en_title), 'rb') as f:
            pass
            # print(f.read())
            # en_sub=f.read()
            # zh_sub=translat(en_sub)
            # zh_srt=open(r'./subtitle/translated/{}.srt'.format(en_title),'wb')
            # zh_srt.write(zh_sub)

            # zh_srt=translat_subtitle(f)

        font = "ArialUnicode"
        color = 'white'
        generator = lambda txt: TextClip(
            txt, font=font, fontsize=40, color=color)
        # sub=SubtitlesClip(r'./subtitle/translated/{}.srt'.format(en_title),'rb')
        sub = SubtitlesClip(r'./subtitle/{}.srt'.format(en_title), generator)

        # final=clips_array([[clip1,clip2]])

        final = CompositeVideoClip([
            concatenate,
            sub.set_start(3).set_pos('center'),
            # logo.set_start(3).set_pos('right','top').crossfadein(1)
            logo.set_start(3).set_pos((1000, 100)).crossfadein(2)
        ])

        # final.write_videofile('add_subtitle.mp4',fps=clip.fps)

        final.write_videofile('./videos/edited/{}.mp4'.format(en_title),
                              fps=main_clip.fps)
Ejemplo n.º 27
0
    "C:\Program Files\ImageMagick-7.0.10-Q16-HDRI\magick.exe"
})
screensize = (720, 460)
composite_durations = []
intro = (TextClip("Nos supers vacances au snowboard",
                  fontsize=20,
                  color='white',
                  bg_color='black').set_position(['center',
                                                  'center']).set_duration(10))
composite_durations.append(intro.duration)
subs = [
    ((10, 20), 'sub1'),
    ((50, 60), 'sub2'),
    ((116, 131), 'sub3'),
]
subtitles = SubtitlesClip(subs).set_position(['center', 'bottom'])
video1 = VideoFileClip("./snow.mp4").subclip(
    (0, 23), (1, 47)).set_position(['center', 'center'])
composite_durations.append(composite_durations[0] + video1.duration)
video2 = VideoFileClip("./snow.mp4").subclip(
    (2, 1), (2, 21)).set_position(['center', 'center'])
composite_durations.append(composite_durations[1] + video2.duration)
outro = (TextClip("Merci à tous", fontsize=20,
                  color='white').set_position(['center',
                                               'center']).set_duration(15))
composite_durations.append(composite_durations[2] + outro.duration)
result = CompositeVideoClip([
    intro,
    video1.set_start(composite_durations[0]),
    video2.set_start(composite_durations[1]),
    outro.set_start(composite_durations[2]), subtitles
def createVideo(originalClipName,
                subtitlesFileName,
                outputFileName,
                alternateAudioFileName,
                useOriginalAudio=True):
    # This function is used to put all of the pieces together.
    # Note that if we need to use an alternate audio track, the last parm should = False

    print("\n==> createVideo ")

    # Load the original clip
    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Reading video clip: " + originalClipName)
    clip = VideoFileClip(originalClipName)
    print("\t\t==> Original clip duration: " + str(clip.duration))

    if useOriginalAudio == False:
        print(strftime("\t" + "%H:%M:%S", gmtime()),
              "Reading alternate audio track: " + alternateAudioFileName)
        audio = AudioFileClip(alternateAudioFileName)
        audio = audio.subclip(0, clip.duration)
        audio.set_duration(clip.duration)
        print("\t\t==> Audio duration: " + str(audio.duration))
        clip = clip.set_audio(audio)
    else:
        print(strftime("\t" + "%H:%M:%S", gmtime()),
              "Using original audio track...")

    # Create a lambda function that will be used to generate the subtitles for each sequence in the SRT
    generator = lambda txt: TextClip(
        txt, font='Arial-Bold', fontsize=24, color='white')

    # read in the subtitles files
    print("\t" + strftime("%H:%M:%S", gmtime()),
          "Reading subtitle file: " + subtitlesFileName)
    subs = SubtitlesClip(subtitlesFileName, generator)

    #print("\t\t==> Subtitles duration before: " + str(subs.duration))

    subs = subs.subclip(0, clip.duration - .001)
    subs.set_duration(clip.duration - .001)

    #try to see if chunks of approx 120 secs will work
    subsetTxts = []
    counter = 0
    subsetStart = 0
    subsetEnd = 0
    txts = []
    for txt in subs:
        counter = counter + 1
        txts.append(txt)
        if counter % 30 == 0:
            subsetTxts.append(txts[:])
            txts = []
    if len(txts) > 0:
        subsetTxts.append(txts[:])

    #subtract .001 from last time
    subsetTxts[-1][-1][0][1] -= 0.001

    test = 0
    clipFileNames = []
    for subset in subsetTxts:
        test = test + 1
        if test > 10:
            break
        fileName = 'clip_' + str(math.floor(subset[0][0][0])) + '.mp4'
        annotated_clips = [
            annotate(clip.subclip(from_t, to_t), txt)
            for (from_t, to_t), txt in subset
        ]
        clipFile = concatenate_videoclips(annotated_clips)
        clipFile.write_videofile(fileName)
        clipFileNames.append(fileName)
        gc.collect()

    finalClips = []
    for c in clipFileNames:
        finalClips.append(VideoFileClip(c))
    finalFile = concatenate_videoclips(finalClips)
    finalFile.write_videofile(outputFileName)
Ejemplo n.º 29
0
    clip_list = []
    for i in range(len(df)):
        dummy = ImageClip(df['Image'][i]).set_duration(df['time'][i])
        audioclip = AudioFileClip('Vid' + df['mp3s'][i])
        dummy = dummy.set_audio(audioclip)
        clip_list.append(dummy)

    video = clip_list[0]

    for z in range(1, len(clip_list)):
        video = concatenate([video, clip_list[z]], method="compose")

    video.write_videofile('test.mp4', fps=1)

    # substitiles
    generator = lambda txt: TextClip(
        txt, font='Arial', fontsize=60, color='black', bg_color='white')
    subtitles = SubtitlesClip("sub.srt", generator)

    video = VideoFileClip("test.mp4")
    result = CompositeVideoClip(
        [video, subtitles.set_pos(('center', 'bottom'))])

    result.write_videofile("file.mp4",
                           fps=video.fps,
                           temp_audiofile="temp-audio.m4a",
                           remove_temp=True,
                           codec="libx264",
                           audio_codec="aac")