예제 #1
0
def text_anim(text, title_number):
    txtClip = TextClip(convert_errors(text),
                       color='white',
                       font="Mangal.ttf",
                       kerning=5,
                       fontsize=100)
    cvc = CompositeVideoClip([txtClip.set_pos('center')], size=screensize)

    # WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

    letters = findObjects(cvc)  # a list of ImageClips

    # WE ANIMATE THE LETTERS

    clips = [
        CompositeVideoClip(moveLetters(letters, funcpos),
                           size=screensize).subclip(0, 3)  #video duration
        for funcpos in [vortex]
    ]

    # WE CONCATENATE EVERYTHING AND WRITE TO A FILE

    final_clip = concatenate_videoclips(clips)
    final_clip.write_videofile('top' + str(video_number) + '.mp4',
                               fps=25,
                               codec='mpeg4')
def AIA_GenerateBackground(
        TEMPLATE, FEATURE, DURATION,
        VIDEONAME):  #The template for video arrangement, EG: TEMPLATE_2x2.png

    im = ImageClip(TEMPLATE)
    regions = findObjects(im)
    vlist = Video_List()
    vlist = AIA_ArrangeByTemp(
        vlist
    )  #Video_List() will sort by wavelength, but we want to sort by temperature
    print("vlist: " + str(vlist))
    clips = [
        VideoFileClip(n) for n in [
            "misc/black.mp4",
            vlist[
                0],  #The order they appear here is the order they appear on the Thermometer (?)
            vlist[1],
            vlist[2],
            vlist[3],
            vlist[4],
            vlist[5],
            FEATURE,  #Second to last is our featured video
            "misc/black.mp4"
        ]
    ]

    comp_clips = [
        c.resize(r.size).set_mask(r.mask).set_pos(r.screenpos)
        for c, r in zip(clips, regions)
    ]  #We build our composite here.

    cc = CompositeVideoClip(comp_clips, im.size)

    cc.set_duration(DURATION).write_videofile(VIDEONAME, fps=24)
예제 #3
0
def Compile_Final_Video(DAILY):
    im = ImageClip("misc/FROST_TEMPLATE.png")
    regions = findObjects(im)

    clips = [
        VideoFileClip(n) for n in [
            "misc/test/1.mp4",
            "misc/test/2.mp4",
            "misc/sidebar/solar_rain.mp4",
            DAILY,  #path to our feature video
        ]
    ]

    clips[1] = ImageClip("misc/sidebar/ContentBody.png")
    clips[0] = ImageClip("misc/sidebar/SDO_Spacecraft.png")

    comp_clips = [
        c.resize(r.size).set_mask(r.mask).set_pos(r.screenpos)
        for c, r in zip(clips, regions)
    ]  #We build our composite here.
    cc = CompositeVideoClip(comp_clips, im.size)

    cc.set_duration(clips[3].duration).write_videofile(
        str(DAILY).split(".")[0] + "_.mp4", fps=24, threads=4, audio=False)
    os.rename(str(DAILY).split(".")[0] + "_.mp4", DAILY)
예제 #4
0
def simpleTextClip(textToShow, fsize, posi, subc, funcpos):
    txtClip = TextClip(textToShow, color='white', fontsize=fsize)
    cvc = CompositeVideoClip([txtClip.set_pos(posi)], size=screensize)
    letters = findObjects(cvc)  # a list of ImageClips
    # WE ANIMATE THE LETTERS
    txt_clip = CompositeVideoClip(ef_moveLetters(letters, funcpos),
                                  size=screensize).subclip(0, subc)
    return txt_clip
예제 #5
0
 def __init__(self):
     self.compositing_pattern = ImageClip('./cole-pattern-480.png')
     self.compositing_regions = findObjects(self.compositing_pattern)
     self.start_clip = VideoFileClip('./videos_480/01-start.mp4')
     self.cole_lean_clip = VideoFileClip('./videos_480/02-cole-lean.mp4')
     self.tv_3_clip = VideoFileClip('./videos_480/tv-extended.mp4')
     self.watching_tv_clip = VideoFileClip('./videos_480/04-watching.mp4')
     self.albert_clip = VideoFileClip('./videos_480/06-albert-glance.mp4')
     self.wth_clip = VideoFileClip('./videos_480/07-wth.mp4')
예제 #6
0
    def step1(self, text, save_name):

        txtClip = TextClip('asdfasdfasdf'.encode('utf-8'), color='white', font=self.font,
                           kerning=5, fontsize=100)
        cvc = CompositeVideoClip([txtClip.set_pos('center')],
                                 size=self.screen)
        letters = findObjects(cvc)

        rotMatrix = lambda a: np.array([[np.cos(a), np.sin(a)],
                                        [-np.sin(a), np.cos(a)]])

        def vortex(screenpos, i, nletters):
            d = lambda t: 1.0 / (0.3 + t ** 8)  # damping
            a = i * np.pi / nletters  # angle of the movement
            v = rotMatrix(a).dot([-1, 0])
            if i % 2: v[1] = -v[1]
            return lambda t: screenpos + 400 * d(t) * rotMatrix(10.5 * d(t) * a).dot(v)

        def cascade(screenpos, i, nletters):
            v = np.array([0, -1])
            d = lambda t: 1 if t < 0 else abs(np.sinc(t) / (1 + t ** 4))
            return lambda t: screenpos + v * 400 * d(t - 0.15 * i)

        def arrive(screenpos, i, nletters):
            v = np.array([-1, 0])
            d = lambda t: max(0, 3 - 3 * t)
            return lambda t: screenpos - 400 * v * d(t - 0.2 * i)

        def vortexout(screenpos, i, nletters):
            d = lambda t: max(0, t)  # damping
            a = i * np.pi / nletters  # angle of the movement
            v = rotMatrix(a).dot([-1, 0])
            if i % 2: v[1] = -v[1]
            return lambda t: screenpos + 400 * d(t - 0.1 * i) * rotMatrix(-0.2 * d(t) * a).dot(v)

        def moveLetters(letters, funcpos):
            return [letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
                    for i, letter in enumerate(letters)]

        clips = [CompositeVideoClip(moveLetters(letters, funcpos),
                                    size=self.screen).subclip(1, 2)
                 for funcpos in [vortex]]

        # WE CONCATENATE EVERYTHING AND WRITE TO A FILE

        final_clip = concatenate_videoclips(clips)

        video = self.twinkle(text[:9], frame_duration=0.4)  # CompositeVideoClip([step1, step1_text_video]).fadein(1)
        video2 = self.twinkle(text[9:12], frame_duration=0.4)  # CompositeVideoClip([step1, step1_text_video]).fadein(1)
        countdown = self.CountDown(text[12:])

        clip = concatenate_videoclips([video, final_clip, video2, countdown], method='compose')
        audio = AudioFileClip(self.step1_audio).subclip(0, clip.end)
        clip.audio = audio
        self.saveVideo(clip, save_name)

        return True
예제 #7
0
 def __init__(self, text = None, screensize = (1280, 720), fontsize=50):        
     txtClip = TextClip(text, color='white', font="Amiri-Bold", kerning = 5, fontsize=fontsize)
     
     cvc = CompositeVideoClip( [txtClip.set_pos('center')],
                                 size=screensize)
     
     letters = findObjects(cvc) # a list of ImageClips
     
     self.clips = [CompositeVideoClip(self.moveLetters(letters, funcpos), 
                                 size = screensize).subclip(0,5) for funcpos in [self.vortex] ]
     
     self.clips = concatenate_videoclips(self.clips)
예제 #8
0
 def __init__(self, phrase="TEST", 
              fontsize=45, 
              kerning=3, 
              text_color='white', 
              font_name='Arial',
              output_text_filename='output_text.mp4',
              videoname="video.mp4",
              logo_filename="/workspace/logo.mp4",
              fade_out_time=1,
              fade_in_time=1,
              time_between_videos=1,
              output_path = "/workspace/output/",
              output_fps=25,
              output_codec='libx264',
              output_video_width=1280,
              output_video_height=720,
              video_bitrate="5000k",
              input_path="/workspace/videos/"):
     
   self.logger = ProgressHandler()
   self.phrase = '\n'.join(phrase [i:i+37] for i in range(0, len(phrase ), 37))
   self.fontsize = fontsize
   self.kerning = kerning
   self.text_color = text_color
   self.font_name = font_name
   self.output_text_filename = output_text_filename
   
   self.videoname=videoname
   self.logo_filename=logo_filename
   
   self.fade_out_time=fade_out_time
   self.fade_in_time=fade_in_time
   self.time_between_videos=time_between_videos
   
   self.output_path = output_path
   self.output_fps=output_fps
   self.output_codec=output_codec #libx264 #mpeg4 #png
   self.output_video_width=output_video_width
   self.output_video_height=output_video_height
   self.video_bitrate=video_bitrate
   self.text_video_size = (self.output_video_width, self.output_video_height)
   self.output_video_name= "output_video.mp4" #self.videoname.split(".")[0]+"_"+str(random.randrange(10,10000))+".mp4"
   
   self.input_path = input_path
 
   self.txtClip = TextClip(self.phrase, color=self.text_color, font=self.font_name, kerning=self.kerning, fontsize=self.fontsize)
   self.cvc = CompositeVideoClip( [self.txtClip.set_pos('center')], size=self.text_video_size)
   self.rotMatrix = lambda a: np.array( [[np.cos(a),np.sin(a)], [-np.sin(a),np.cos(a)]])
   self.letters = findObjects(self.cvc)
def create_video_description(count):

    txtClip = TextClip('Number ' + str(count),
                       color='white',
                       font="Amiri-Bold",
                       kerning=5,
                       fontsize=100)
    cvc = CompositeVideoClip([txtClip.set_pos('center')])
    letters = findObjects(cvc)  # a list of ImageClips

    clips = [
        CompositeVideoClip(moveLetters(letters, funcpos)).subclip(0, 5)
        for funcpos in [vortex, cascade, arrive, vortexout]
    ]

    return concatenate_videoclips(clips)
예제 #10
0
def maker(video_path, text, effect, music_path):
    video_clip = VideoFileClip(video_path)
    audio_clip = AudioFileClip(music_path)

    # bg_music = audio_clip.subclip(0, video_clip.duration)
    video_clip = video_clip.set_audio(audio_clip)
    fps = video_clip.fps

    # WE CREATE THE TEXT THAT IS GOING TO MOVE, WE CENTER IT.
    w, h = video_clip.size
    screensize = video_clip.size
    txtClip = TextClip(text,
                       color='white',
                       font="Amiri-Bold",
                       kerning=5,
                       fontsize=100)
    cvc = CompositeVideoClip([txtClip.set_position('center')], size=screensize)

    # THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS

    # WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

    letters = findObjects(cvc, rem_thr=0)  # a list of ImageClips

    # WE ANIMATE THE LETTERS

    def moveLetters(letters, funcpos):
        return [
            letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
            for i, letter in enumerate(letters)
        ]

# clips = [ CompositeVideoClip( moveLetters(letters,funcpos),
#                               size = screensize).subclip(0,5)
#           for funcpos in [vortex, cascade, arrive, vortexout] ]

# WE CONCATENATE EVERYTHING AND WRITE TO A FILE

    effect_clip = CompositeVideoClip(moveLetters(letters, effect),
                                     size=screensize)
    effect_clip = effect_clip.set_duration(5)

    final_clip = CompositeVideoClip([video_clip, effect_clip])

    # final_clip = concatenate_videoclips(clips)
    final_clip.write_videofile(f'../MovieMaker/Videos/{text}.mp4')
예제 #11
0
    d = lambda t: max(0, 3 - 3 * t)
    return lambda t: screenpos - 400 * v * d(t - 0.2 * i)


def vortexout(screenpos, i, nletters):
    d = lambda t: max(0, t)  #damping
    a = i * np.pi / nletters  # angle of the movement
    v = rotMatrix(a).dot([-1, 0])
    if i % 2: v[1] = -v[1]
    return lambda t: screenpos + 400 * d(t - 0.1 * i) * rotMatrix(-0.2 * d(t) *
                                                                  a).dot(v)


# WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

letters = findObjects(cvc)  # a list of ImageClips

# WE ANIMATE THE LETTERS


def moveLetters(letters, funcpos):
    return [
        letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
        for i, letter in enumerate(letters)
    ]


clips = [
    CompositeVideoClip(moveLetters(letters, funcpos),
                       size=screensize).subclip(0, 5)
    for funcpos in [vortex, cascade, arrive, vortexout]
예제 #12
0
def create_video(request):
    screensize = (720,460)
    txtClip = TextClip('Cool effect', color='white', font="Amiri-Bold",
                       kerning=5, fontsize=100)
    cvc = CompositeVideoClip( [txtClip.set_pos('center')],
                              size=screensize)

	# THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS

	# helper function
    rotMatrix = lambda a: np.array( [[np.cos(a),np.sin(a)],
                                     [-np.sin(a),np.cos(a)]] )

    def vortex(screenpos,i,nletters):
		d = lambda t : 1.0/(0.3+t**8) #damping
		a = i*np.pi/ nletters # angle of the movement
		v = rotMatrix(a).dot([-1,0])
		if i%2 : v[1] = -v[1]
		return lambda t: screenpos+400*d(t)*rotMatrix(0.5*d(t)*a).dot(v)

    def cascade(screenpos,i,nletters):
		v = np.array([0,-1])
		d = lambda t : 1 if t<0 else abs(np.sinc(t)/(1+t**4))
		return lambda t: screenpos+v*400*d(t-0.15*i)

    def arrive(screenpos,i,nletters):
		v = np.array([-1,0])
		d = lambda t : max(0, 3-3*t)
		return lambda t: screenpos-400*v*d(t-0.2*i)

    def vortexout(screenpos,i,nletters):
		d = lambda t : max(0,t) #damping
		a = i*np.pi/ nletters # angle of the movement
		v = rotMatrix(a).dot([-1,0])
		if i%2 : v[1] = -v[1]
		return lambda t: screenpos+400*d(t-0.1*i)*rotMatrix(-0.2*d(t)*a).dot(v)

	# WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

    letters = findObjects(cvc) # a list of ImageClips

	# WE ANIMATE THE LETTERS

    def moveLetters(letters, funcpos):
		return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters)))
				  for i,letter in enumerate(letters)]

    clips = [ CompositeVideoClip(moveLetters(letters,funcpos),
								  size = screensize).subclip(0,5)
			  for funcpos in [vortex, cascade, arrive, vortexout] ]

	# WE CONCATENATE EVERYTHING AND WRITE TO A FILE

    final_clip = concatenate_videoclips(clips)
    audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration)
    final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0)

    #final_clip = vfx.resize(final_clip, (570, 570))

    final_clip.write_videofile('videos/coolTextEffects.mp4',
                               fps=23, codec='libx264',
                               audio_bitrate='1000k', bitrate='4000k')

    #final_clip.write_gif('videos/coolGif.gif', fps=23)

    html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>"
    return HttpResponse(html)
예제 #13
0
    v = np.array([-1,0])
    d = lambda t : max(0, 3-3*t)
    return lambda t: screenpos-400*v*d(t-0.2*i)

def vortexout(screenpos,i,nletters):
    d = lambda t : max(0,t) #damping
    a = i*np.pi/ nletters # angle of the movement
    v = rotMatrix(a).dot([-1,0])
    if i%2 : v[1] = -v[1]
    return lambda t: screenpos+400*d(t-0.1*i)*rotMatrix(-0.2*d(t)*a).dot(v)



# WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

letters = findObjects(cvc) # a list of ImageClips


# WE ANIMATE THE LETTERS

def moveLetters(letters, funcpos):
    return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters)))
              for i,letter in enumerate(letters)]

clips = [ CompositeVideoClip( moveLetters(letters,funcpos),
                              size = screensize).subclip(0,5)
          for funcpos in [vortex, cascade, arrive, vortexout] ]

# WE CONCATENATE EVERYTHING AND WRITE TO A FILE

concatenate(clips).write_videofile('coolTextEffects.avi',
예제 #14
0
def createdoubledroptext(text, font, fontsize, fontcolour):
    screensize = (720, 460)
    screensize = (1920, 1080)

    logging.debug('CreateDoubleDropText-01' + text)
    txtClipTop1 = TextClip(text,
                           color='white',
                           font=font,
                           kerning=5,
                           fontsize=int(fontsize))
    cvctop1 = CompositeVideoClip([txtClipTop1.set_pos('center')],
                                 size=screensize)

    txtClipBottom1 = TextClip(text,
                              color=fontcolour,
                              font=font,
                              kerning=5,
                              fontsize=int(fontsize))
    cvcbottom1 = CompositeVideoClip([txtClipBottom1.set_pos('center')],
                                    size=screensize)

    # THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS

    # helper function
    rotMatrix = lambda a: np.array([[np.cos(a), np.sin(a)],
                                    [-np.sin(a), np.cos(a)]])

    def cascade(screenpos, i, nletters):
        print('cascade:screenpos' + str(screenpos))
        print('cascade:i:' + str(i))
        v = np.array([0, -1])
        d = lambda t: 1 if t < 0 else abs(np.sinc(t) / (1 + t**4))
        return lambda t: screenpos + v * 50 * d(t - 0.25 * i)

    def cascadeout(screenpos, i, nletters):
        print('cascadeout:screenpos' + str(screenpos))
        print('cascadeout:i:' + str(i))
        v = np.array([0, -1])
        d = lambda t: 1 if t < 0 else abs(np.sinc(t) / (1 + t**4))
        return lambda t: screenpos + v * -50 * d(t + 0.25 * i)

    def cascadeback(screenpos, i, nletters):
        print('cascadeback:screenpos' + str(screenpos))
        print('cascadeback:i:' + str(i))
        v = np.array([0, -1])
        d = lambda t: 1 if t < 0 else abs(np.sinc(t) / (1 + t**4))
        return lambda t: screenpos + v * 55 * d(t - 0.25 * i)

    def cascadeoutback(screenpos, i, nletters):
        v = np.array([0, -1])
        d = lambda t: 1 if t < 0 else abs(np.sinc(t) / (1 + t**4))
        return lambda t: screenpos + v * -55 * d(t + 0.25 * i)

    logging.debug('CreateDoubleDropText-02-Letters')
    # WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER
    letterstop1 = findObjects(cvctop1)  # a list of ImageClips

    lettersbottom1 = findObjects(cvcbottom1)  # a list of ImageClips

    # WE ANIMATE THE LETTERS
    def moveLetters(letters, funcpos):
        return [
            letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
            for i, letter in enumerate(letters)
        ]

    # clips = [CompositeVideoClip(moveLetters(letters, funcpos),
    #                             size=screensize).subclip(0, 5)
    #          for funcpos in [vortex, cascade, arrive, vortexout]]

    clipstop1 = [
        CompositeVideoClip(moveLetters(letterstop1, funcpos),
                           size=screensize).subclip(0, 2)
        for funcpos in [cascade]
    ]

    clipstop2 = [
        CompositeVideoClip(moveLetters(lettersbottom1, funcpos),
                           size=screensize).subclip(0, 2)
        for funcpos in [cascadeback]
    ]

    clips_bot = [
        CompositeVideoClip(moveLetters(letterstop1, funcpos),
                           size=screensize).subclip(0, 2)
        for funcpos in [cascadeout]
    ]

    clips_bot2 = [
        CompositeVideoClip(moveLetters(lettersbottom1, funcpos),
                           size=screensize).subclip(0, 2)
        for funcpos in [cascadeoutback]
    ]

    # WE CONCATENATE EVERYTHING AND RETURN THE CLIP
    logging.debug('CreateDoubleDropText-03-Clips')
    fclip1 = concatenate_videoclips(clipstop1)
    fclip2 = concatenate_videoclips(clipstop2)
    fclip3 = concatenate_videoclips(clips_bot2)
    fclip4 = concatenate_videoclips(clips_bot)
    logging.debug('CreateDoubleDropText-04-Final')
    final = CompositeVideoClip([fclip1, fclip2, fclip3, fclip4])

    return final
예제 #15
0
from moviepy.editor import *
from moviepy.video.tools.segmenting import findObjects

# Load the image specifying the regions.
im = ImageClip("../../ultracompositing/motif.png")

# Loacate the regions, return a list of ImageClips
regions = findObjects(im)

# Load 7 clips from the US National Parks. Public Domain :D
clips = [
    VideoFileClip(n, audio=False).subclip(18, 22) for n in [
        "../../videos/romo_0004.mov", "../../videos/apis-0001.mov",
        "../../videos/romo_0001.mov", "../../videos/elma_s0003.mov",
        "../../videos/elma_s0002.mov", "../../videos/calo-0007.mov",
        "../../videos/grsm_0005.mov"
    ]
]

# fit each clip into its region
comp_clips = [
    c.resize(r.tamano).set_mask(r.mask).set_pos(r.screenpos)
    for c, r in zip(clips, regions)
]

cc = CompositeVideoClip(comp_clips, im.tamano)
cc.resize(0.6).write_videofile("../../composition.mp4")

# Note that this particular composition takes quite a long time of
# rendering (about 20s on my computer for just 4s of video).
예제 #16
0
from moviepy.editor import *
from moviepy.video.tools.segmenting import findObjects

# Load the image specifying the regions.
im = ImageClip("../../ultracompositing/motif.png")

# Loacate the regions, return a list of ImageClips
regions = findObjects(im)


# Load 7 clips from the US National Parks. Public Domain :D
clips = [VideoFileClip(n, audio=False).subclip(18,22) for n in
     [ "../../videos/romo_0004.mov",
      "../../videos/apis-0001.mov",
      "../../videos/romo_0001.mov",
      "../../videos/elma_s0003.mov",
      "../../videos/elma_s0002.mov",
      "../../videos/calo-0007.mov",
      "../../videos/grsm_0005.mov"]]

# fit each clip into its region
comp_clips =  [c.resize(r.size)
                .set_mask(r.mask)
                .set_pos(r.screenpos)
               for c,r in zip(clips,regions)]

cc = CompositeVideoClip(comp_clips,im.size)
cc.resize(0.6).write_videofile("../../composition.mp4")

# Note that this particular composition takes quite a long time of
# rendering (about 20s on my computer for just 4s of video).
예제 #17
0
def create_presentation_video(request):
    # trip introduction
    #screensize = (720, 460)
    screensize = (1024, 780)
    txt_intro = TextClip('Just Back From...',
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 80))
    txt_dest1 = TextClip('Seville, Spain',
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 120))
    txt_dest2 = TextClip('Costa Brava, Spain',
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 160))
    txt_dest3 = TextClip('Arles, France',
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 200))
    txt_dest4 = TextClip('Eze, France',
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 240))

    #title_clip = (TextClip("Just Back From...", fontsize=35,
    #                font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1,
    #                bg_color='#e04400', method='caption', align='center', size=(image_clips.w, image_clips.h))
    #             .margin(top=5, opacity=0)
    #             .set_duration(3).fadein(.5).fadeout(.5)
    #             .set_position(("center", "top")))

    #txt = "\n".join([
    #"Just Back From...",
    #"Seville, Spain",
    #"Costa Brava, Spain",
    #"Arles, France",
    #"Eze, France"
    #])
    #txt_clip1 = TextClip(txt, color='white', font="Amiri-Bold",
    #                   kerning=2, fontsize=50).set_position((10, 80))

    #clip_txt = TextClip(txt,color='white', align='West',fontsize=25,
    #                font='Xolonium-Bold', method='label')

    #txt_clips = clips_array([[txt_clip1, txt_clip2]])
    #cvc = CompositeVideoClip([txt_clip1, txt_clip2, txt_clip3, txt_clip4, txt_clip5],
    cvc = CompositeVideoClip([txt_intro, txt_dest1],
                              size=screensize)

    # helper function
    rot_matrix = lambda a: np.array([[np.cos(a), np.sin(a)],
                                     [-np.sin(a), np.cos(a)]])

    def cascade(screenpos, i, nletters):
        v = np.array([0,-1])
        d = lambda t: 1 if t<0 else abs(np.sinc(t)/(1+t**4))
        return lambda t: screenpos+v*400*d(t-0.15*i)

    def vortexout(screenpos,i,nletters):
        d = lambda t : max(0,t) #damping
        a = i*np.pi/ nletters # angle of the movement
        v = rot_matrix(a).dot([-1,0])
        if i % 2: v[1] = -v[1]
        return lambda t: screenpos+400*d(t-0.1*i)*rot_matrix(-0.2*d(t)*a).dot(v)

    letters = findObjects(cvc) # a list of ImageClips

    def moveLetters(letters, funcpos):
        return [letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
                  for i, letter in enumerate(letters)]

    clips = [CompositeVideoClip(moveLetters(letters, funcpos),
                                  size=screensize).subclip(0, 3)
              for funcpos in [cascade, vortexout]]

    final_clip = concatenate_videoclips(clips)

    final_clip.write_videofile('videos/presentationVideo.mp4',
                               fps=23, codec='libx264',
                               audio_bitrate='1000k', bitrate='4000k')

    html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>"
    return HttpResponse(html)
예제 #18
0
    else:
        result.append(l)
    return result


def moveLetters(letters, funcpos):
    return [
        letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
        for i, letter in enumerate(letters)
    ]


textIntro = TextClip(txt="MY SUPER TOOL",
                     fontsize=70,
                     color='white',
                     font="Amiri-Bold").set_duration(10).set_position("center")
m6bfd3e08 = CompositeVideoClip([textIntro.set_pos('center')], size=screensize)

letters = findObjects(m6bfd3e08)

textIntro = CompositeVideoClip(moveLetters(letters, cascade),
                               size=screensize).subclip(0, 5)

clip1 = VideoFileClip("resources/video/dj_rexma.mp4")
credit = createCredits("Bernard,Didier,Francois,Stephane,Loic,Guillaume",
                       300).subclip(0, 4)
result = concatenate_videoclips(flatten([textIntro, clip1, credit]))
result.write_videofile("resources/result_videos/scenarioExtension.webm",
                       fps=25,
                       threads=4)
def createintro(projectid):
    status = 0
    starttime = datetime.now()

    logging.debug('createtemplate02 - Start:' + str(starttime))

    projectPath = cfg.VIDEOS_LOCATION + projectid
    logging.debug('createtemplate02 - Start:' + projectPath)

    try:
        status = 1

        if os.path.exists(projectPath + '/intro.mp4') == True:
            os.remove(projectPath + '/intro.mp4')
        if os.path.exists(projectPath + '/intro_comp.mp4') == True:
            os.remove(projectPath + '/intro_comp.mp4')

        fps = 50
        frames = 0

        logging.debug('01 - Get Template Config')
        # Get Template Config Details
        # from template.json
        tempdets = TemplateDetails()
        tempdets.readtemplate(projectid)
        logging.debug(tempdets.textLine01)

        logging.debug('02 - Set Opts')

        iBgTransp = 150
        iTxTransParency = 20

        logging.debug('03 - Setup Paths')
        if not os.path.isdir(str(projectid) + 'imgts'):
            print('new directry has been created')
            os.system('mkdir ' + str(projectid) + 'imgts')
        else:
            shutil.rmtree(str(projectid) + 'imgts')
            os.system('mkdir ' + str(projectid) + 'imgts')

        # FIRST LINE OF TEXT
        logging.debug('04 - Setup Fonts')
        logging.debug('FontSize:' + tempdets.textLine01FontSize)
        logging.debug('Font:' + tempdets.textLine01Font)



        logging.debug('05 - Fonts Size 1')
        # Get the width/height in pixels of the text so we can centre
        # on the screen
        # Set the font up for writing out the text
        fnt = ImageFont.truetype(tempdets.textLine01Font, int(tempdets.textLine01FontSize))
        iFontImageSize = fnt.getsize(tempdets.textLine01)
        iFW1 = iFontImageSize[0]
        iFH1 = iFontImageSize[1]

        logging.debug('05 - Fonts Size 2')
        fnt = ImageFont.truetype(tempdets.textLine02Font, int(tempdets.textLine02FontSize))
        iFontImageSize = fnt.getsize(tempdets.textLine02)

        iFW2 = iFontImageSize[0]
        iFH2 = iFontImageSize[1]

        print('iFW2:'+str(iFW2))
        print('iFH2:' + str(iFH2))

        logging.debug('05 - Fonts Size 3')
        fnt = ImageFont.truetype(tempdets.textLine03Font, int(tempdets.textLine03FontSize))
        iFontImageSize = fnt.getsize(tempdets.textLine03)

        iFW3 = iFontImageSize[0]
        iFH3 = iFontImageSize[1]

        logging.debug('05 - Fonts Colors 1 ')
        print(str(tempdets.textLine01FontColorR))
        print(str(tempdets.textLine01FontColorG))
        print(str(tempdets.textLine01FontColorB))

        if tempdets.templateBGColorR == 0:
            tempdets.templateBGColorR = 10
        if tempdets.templateBGColorG == 0:
            tempdets.templateBGColorG = 10
        if tempdets.templateBGColorB == 0:
            tempdets.templateBGColorB = 10

        # Big Dirty Hack... if any of the colours are 0 then set to 10 as it doesn't like them JIMMY!!!
        if tempdets.textLine01FontColorR == 0:
            tempdets.textLine01FontColorR = 10
        if tempdets.textLine01FontColorG == 0:
            tempdets.textLine01FontColorG = 10
        if tempdets.textLine01FontColorB == 0:
            tempdets.textLine01FontColorB = 10
        if tempdets.textLine02FontColorR == 0:
            tempdets.textLine02FontColorR = 10
        if tempdets.textLine02FontColorG == 0:
            tempdets.textLine02FontColorG = 10
        if tempdets.textLine02FontColorB == 0:
            tempdets.textLine02FontColorB = 10
        if tempdets.textLine03FontColorR == 0:
            tempdets.textLine03FontColorR = 10
        if tempdets.textLine03FontColorG == 0:
            tempdets.textLine03FontColorG = 10
        if tempdets.textLine03FontColorB == 0:
            tempdets.textLine03FontColorB = 10

        textline01HEX = webcolors.rgb_to_hex((tempdets.textLine01FontColorR, tempdets.textLine01FontColorG,
                                              tempdets.textLine01FontColorB))
        print(str(textline01HEX))
        logging.debug('05 - Fonts Colors 2')
        textline02HEX = webcolors.rgb_to_hex((tempdets.textLine02FontColorR, tempdets.textLine02FontColorG,
                                              tempdets.textLine02FontColorB))
        print(str(textline02HEX))
        logging.debug('05 - Fonts Colors 3 ')
        textline03HEX = webcolors.rgb_to_hex((tempdets.textLine03FontColorR, tempdets.textLine02FontColorG,
                                              tempdets.textLine03FontColorB))
        print(str(textline03HEX))
        screensize = (1920, 1080)

        def arrive(screenpos, i, nletters):
            logging.debug('07 - Split and move letters 3')
            print('scrrenpos:'+str(screenpos))
            print('i:'+str(i))
            v = np.array([-1, 0])
            d = lambda t: max(0, 3 - 3 * t)
            return lambda t: screenpos - 1020 * v * d(t - 0.1 * i)

        def arriveleft(screenpos, i, nletters):
            v = np.array([-1, 0])
            d = lambda t: max(0, 3 - 3 * t)
            return lambda t: screenpos + 1020 * v * d(t - 0.1 * i)

        def moveLetters(letters, funcpos):
            logging.debug('07 - move letters 3.1')
            return [letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
                    for i, letter in enumerate(letters)]


        print('iFW:'+str(iFW1))


        def makeslidebox1(t):
            surface = gizeh.Surface(width=1920, height=1080)

            if t == 0:
                iW1 = 10
            else:
                iW1 = iFW1

            rect = gizeh.rectangle(lx=(iW1/2 * (t + 1)), ly=(iFH1 + 100),
                                   xy=((1920 * t), ((1080 - (iFH1 + 100)) / 2) + ((iFH1 + 50) / 2)),
                                   fill=(0, 0,
                                          tempdets.templateBGColorB),
                                   angle=0)
            rect.draw(surface)
            return surface.get_npimage(transparent=True)

        def makeslidebox2(t):
            surface2 = gizeh.Surface(width=1920, height=1080)

            if t == 0:
                iW2 = 50
            else:
                iW2 = iFW2
            print('t'+str(t))
            #print('x' + str(1920 - (100* t)))
            rect2 = gizeh.rectangle(lx=(iW2 * (t + 1)), ly=(iFH2 + 100),
                                    xy=((1920 - (750* t)), ((1080 - (iFH2 + 100)) / 2) + ((iFH2 + 50) / 2)),
                                    fill=(0, 0,
                                          tempdets.templateBGColorB),
                                    angle=0)
            rect2.draw(surface2)
            return surface2.get_npimage(transparent=True)

        def makeslidebox3(t):
            surface = gizeh.Surface(width=1920, height=1080)

            if t == 0:
                iW1 = 10
            else:
                iW1 = iFW3

            rect = gizeh.rectangle(lx=(iW1/2 * (t + 1)), ly=(iFH3 + 100),
                                   xy=((1920 * t), ((1080 - (iFH3 + 100)) / 2) + ((iFH3 + 50) / 2)),
                                   fill=(0, 0,
                                          tempdets.templateBGColorB),
                                   angle=0)
            rect.draw(surface)
            return surface.get_npimage(transparent=True)

        logging.debug('06 - Mask')

        # Line 1
        graphics_clip_mask = VideoClip(lambda t: makeslidebox1(t)[:, :, 3] / 255.0, duration=2, ismask=True)
        graphics_clip = VideoClip(lambda t: makeslidebox1(t)[:, :, :3], duration=2).set_mask(graphics_clip_mask)
        #
        logging.debug('06 - TextClip')
        logging.debug(tempdets.textLine01)
        logging.debug(textline01HEX)
        logging.debug(tempdets.textLine01Font)
        logging.debug(tempdets.textLine01FontSize)
        #

        bgclip1 = ColorClip((1920, 1080), col=(tempdets.templateBGColorR, tempdets.templateBGColorG,
                                          tempdets.templateBGColorB))
        bgclip1 = bgclip1.set_duration(3)

        txtClip = TextClip(tempdets.textLine01, color=textline01HEX, font=tempdets.textLine01Font,
                           kerning=5, fontsize=int(tempdets.textLine01FontSize))

        cvc = CompositeVideoClip([txtClip.set_pos('center')], size=screensize)
        #
        logging.debug('07 - Split and move letters')
        letters = findObjects(cvc)
        logging.debug('07 - Split and move letters 2')
        #
        textClip1 = [CompositeVideoClip(moveLetters(letters, funcpos),
                                        size=screensize).subclip(0, 2)
                 for funcpos in [arrive]]
        #
        #
        logging.debug('07 - Split and move letters 2')
        #
        txtClip1 = concatenate_videoclips(textClip1)
        #
        clips1 = CompositeVideoClip([bgclip1, txtClip1, graphics_clip], size=(1920, 1080))


        # Line 2
        graphics_clip_mask2 = VideoClip(lambda t: makeslidebox2(t)[:, :, 3] / 255.0, duration=3, ismask=True)
        graphics_clip2 = VideoClip(lambda t: makeslidebox2(t)[:, :, :3], duration=3).set_mask(graphics_clip_mask2)

        logging.debug('06 - TextClip')

        txtClip2 = TextClip(tempdets.textLine02, color=textline02HEX, font=tempdets.textLine02Font,
                           kerning=5, fontsize=int(tempdets.textLine02FontSize))

        cvc2 = CompositeVideoClip([txtClip2.set_pos('center')], size=screensize)

        logging.debug('07 - Split and move letters')
        letters2 = findObjects(cvc2)

        textClip2 = [CompositeVideoClip(moveLetters(letters2, funcpos2),
                                        size=screensize).subclip(0, 3)
                     for funcpos2 in [arriveleft]]

        logging.debug('07 - Split and move letters 2')

        txtClip2 = concatenate_videoclips(textClip2)

        clips2 = CompositeVideoClip([bgclip1, txtClip2, graphics_clip2], size=(1920, 1080))

        # Line 3
        graphics_clip_mask = VideoClip(lambda t: makeslidebox3(t)[:, :, 3] / 255.0, duration=3, ismask=True)
        graphics_clip = VideoClip(lambda t: makeslidebox3(t)[:, :, :3], duration=3).set_mask(graphics_clip_mask)
        #
        logging.debug('06 - TextClip')
        #
        txtClip = TextClip(tempdets.textLine03, color=textline03HEX, font=tempdets.textLine03Font,
                           kerning=5, fontsize=int(tempdets.textLine03FontSize))

        cvc = CompositeVideoClip([txtClip.set_pos('center')], size=screensize)
        #
        logging.debug('07 - Split and move letters')
        letters = findObjects(cvc)
        #
        textClip3 = [CompositeVideoClip(moveLetters(letters, funcpos),
                                        size=screensize).subclip(0, 2)
                     for funcpos in [arrive]]
        #
        #
        logging.debug('07 - Split and move letters 2')
        #
        txtClip3 = concatenate_videoclips(textClip3)
        #
        clips3 = CompositeVideoClip([bgclip1, txtClip3, graphics_clip], size=(1920, 1080))

        # Put them all together
        clips = concatenate_videoclips([clips1, clips2, clips3])

        logging.debug("Intro has been successfully rendered, returning it to main render")
        return clips

        logging.debug('99-VideoFileClip')

        logging.debug('99-WriteFinal')
        finaldestination = projectPath + '/intro.mp4'
        logging.debug('99-File: ' + finaldestination)

        #clips.write_videofile(finaldestination, threads=4, audio=False, codec='libx264', preset='ultrafast')

        #firstclipLoc = projectPath + '/' + tempdets.firstclip

        # if os.path.exists(firstclipLoc) == True:
        #     clip = VideoFileClip(firstclipLoc, audio=False).subclip(0, 3)
        #     #
        #     logging.debug('99-Composit')
        #     final = CompositeVideoClip([clip, clips])
        #     #
        #     final.write_videofile(finaldestination, threads=4, audio=False, codec='libx264', preset='ultrafast', fps=50)
        # else:
        #     logging.debug('99-No Overlay Clip')

        clips.write_videofile(finaldestination, threads=4, audio=False, codec='libx264', preset='ultrafast', fps=50)
        #
        # finaldestination = projectPath + 'intro_comp.mp4'
        # logging.debug('99-File: ' + finaldestination)
        # final.write_videofile(finaldestination, threads=4, audio=False, codec='libx264')

        # clips.close()
        # final.write_videofile(finaldestination, threads=4, audio=False, codec='libx264', progress_bar=false)
    except Exception as e:
        status = str(e)
        logging.debug(cfg.SERVICENAME + '10 - End... start:' + status)

    endtime = datetime.now()

    logging.debug(cfg.SERVICENAME + '10 - End... start:' + str(starttime) + ' end:' + str(endtime))

    return "Status : " + str(status)
예제 #20
0
def create_text_clips(request):
    trip_stats = process_user_stats()

    # trip introduction
    #screensize = (720, 460)
    screensize = (1024, 780)
    txt_intro = TextClip('{0} Just Back From...'.format(trip_stats["username"]),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 80))

    for idx, d in enumerate(trip_stats["destinations"]):
        txt_clip = TextClip(d,
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((10, 120+idx*20))
        if idx == 0:
            txt_dest1 = txt_clip
        elif idx == 1:
            txt_dest2 = txt_clip
        elif idx == 2:
            txt_dest3 = txt_clip
        else:
            txt_dest4 = txt_clip

    txt_published_on = TextClip(trip_stats['published_date'],
                    color='white', font="Amiri-Bold",
                   kerning=2, fontsize=50).set_position((10, 220))

    # final trip stats
    txt_trip_stats = TextClip('TRIP STATS',
                    color='white', font="Amiri-Bold",
                   kerning=2, fontsize=50).set_position((10, 120))

    if trip_stats['via']:
        txt_via = TextClip('Via {0}'.format(trip_stats['via']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 80))

    if trip_stats['miles']:
        txt_miles = TextClip('{0} Round-trip miles'.format(trip_stats['miles']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 100))

    txt_is_international_trip = TextClip('International trip' if trip_stats['international'] else 'Domestic trip',
                    color='white', font="Amiri-Bold",
                   kerning=2, fontsize=50).set_position((40, 120))

    if trip_stats['cities_qty']:
        txt_cities_qty = TextClip('{0} cities'.format(trip_stats['cities_qty']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 140))

    if trip_stats['states_qty']:
        txt_states_qty = TextClip('{0} U.S. state'.format(trip_stats['states_qty']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 160))

    if trip_stats['foreign_countries_qty']:
        txt_foreign_countries_qty = TextClip('{0} foreign country'.format(trip_stats['foreign_countries_qty']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 180))

    if trip_stats['natpark']:
        txt_foreign_countries_qty = TextClip('{0} National parks'.format(trip_stats['natpark']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 200))

    if trip_stats['events_qty']:
        txt_foreign_countries_qty = TextClip('{0} events'.format(trip_stats['events_qty']),
                        color='white', font="Amiri-Bold",
                       kerning=2, fontsize=50).set_position((40, 220))

    #todo Last screen!
    #Boastable
    #Your life in travel
    #See Santi's whole trip

    #agregar los clips que existen, chequear!!
    cvc = CompositeVideoClip([txt_intro, txt_dest1, txt_dest2, txt_dest3, txt_dest4, txt_published_on],
                              size=screensize)

    # helper function
    rot_matrix = lambda a: np.array([[np.cos(a), np.sin(a)],
                                     [-np.sin(a), np.cos(a)]])

    def cascade(screenpos, i, nletters):
        v = np.array([0,-1])
        d = lambda t: 1 if t<0 else abs(np.sinc(t)/(1+t**4))
        return lambda t: screenpos+v*400*d(t-0.15*i)

    def vortexout(screenpos,i,nletters):
        d = lambda t : max(0,t) #damping
        a = i*np.pi/ nletters # angle of the movement
        v = rot_matrix(a).dot([-1,0])
        if i % 2: v[1] = -v[1]
        return lambda t: screenpos+400*d(t-0.1*i)*rot_matrix(-0.2*d(t)*a).dot(v)

    letters = findObjects(cvc) # a list of ImageClips

    def moveLetters(letters, funcpos):
        return [letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
                  for i, letter in enumerate(letters)]

    clips = [CompositeVideoClip(moveLetters(letters, funcpos),
                                  size=screensize).subclip(0, 3)
              for funcpos in [cascade, vortexout]]

    final_clip = concatenate_videoclips(clips)

    final_clip.write_videofile('videos/presentationVideo.mp4',
                               fps=23, codec='libx264',
                               audio_bitrate='1000k', bitrate='4000k')

    html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>"
    return HttpResponse(html)
                result.append(l)
        return result

def moveLetters(letters, funcpos):
    return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters)))
              for i,letter in enumerate(letters)]

backgroundintroClip= ColorClip(size=(1920,1080), color=(0, 0, 0)).set_duration(5)
subs0 =[((0, 5), 'Intro Title', 'center', 'white'),
((5, backgroundintroClip.duration), ' ', 'bottom', 'white')]
introClip = [annotate(backgroundintroClip.subclip(from_t, to_t), txt, position, color) for (from_t, to_t), txt, position, color in subs0]
textIntro = TextClip(txt="MA VIDéO DE DSL",fontsize=70,color='white',font="Amiri-Bold").set_duration(5).set_position("center")
m11151fbd = CompositeVideoClip( [textIntro.set_pos('center')],
                        size=screensize)

letters = findObjects(m11151fbd)

textIntro =  CompositeVideoClip( moveLetters(letters, cascade), size = screensize).subclip(0,5)


clip1 = VideoFileClip("target/video/Alice's cast work.webm")
clip1a = clip1.subclip(23,43)
subs1 =[((0, 5), 'Le premier clip, le travail d'Alice', 'bottom', 'white'),
((5, 7), ' ', 'bottom', 'white'),
((7, 13), 'un travail très intéréssant !!', 'bottom', 'white'),
((13, clip1a.duration - 5), ' ', 'bottom', 'white'),
((clip1a.duration - 5, clip1a.duration), 'on va changer de clip', 'bottom', 'white')]
clip1a = [annotate(clip1a.subclip(from_t, to_t), txt, position, color) for (from_t, to_t), txt, position, color in subs1]
clip2 = VideoFileClip("target/video/Alice&BobHolidaysPart1.webm")
subs2 =[((0, 5), 'on va changer de clip', 'bottom', 'white'),
((5, clip2.duration), ' ', 'bottom', 'white')]
예제 #22
0
from moviepy.editor import *
from moviepy.video.tools.segmenting import findObjects

text_clip = TextClip('Hello, World!', color='white', kerning=5, fontsize=100)
composite_video_clip = CompositeVideoClip([text_clip.set_pos('center')], size=(1000, 1000))


def in_effect(pos):
    d = lambda t: 1.0 / (0.3 + t ** 8)
    return lambda t: pos + 400 * d(t)


def out_effect(pos):
    d = lambda t: max(0, t)
    return lambda t: pos + 400 * d(t)


def move(letters, func_pos):
    return [i.set_pos(func_pos(i.screenpos)) for i in letters]


clips = [CompositeVideoClip(move(findObjects(composite_video_clip), func_pos), size=(1000, 1000)).subclip(0, 5)
         for func_pos in [in_effect, out_effect]]

video_clip = concatenate_videoclips(clips)
video_clip.write_videofile('hello_world.avi', fps=60, codec='mpeg4')