Exemplo n.º 1
0
def subscribers_video():

    subscriber_videos = []
    random.seed()
    with open(join(settings.VIDEO_ASSETS, "subscribers.txt")) as f:
        subscribers = f.read()
    subscribers = subscribers.split("\n")[:-1]
    background = ImageClip(join(settings.IMAGE_ASSETS,
                                "subscribers_bg.png")).set_duration(duration)
    title = TextClip(
        txt="SPECIAL THANKS TO OUR SUBSCRIBERS!",
        color='white',
        font='gill_sans.txt',
        fontsize=60).set_duration(duration)
    title = title.set_pos((350, 10))
    t1 = 1
    for sub_name in subscribers:
        t1 += 0.2
        t0 = math.log(t1)
        subscriber = TextClip(
            txt=sub_name, color='white', font='gill_sans.txt',
            fontsize=40).set_duration(duration)
        x, y = random.random() * 1700, random.random() * 940 + 100
        subscriber = subscriber.set_pos(
            lambda t, t0=t0, x=x, y=y: ((x, (y, -200)[t < t0])))
        subscriber_videos.append(subscriber)

    audioclip = VideoFileClip(join(settings.VIDEO_ASSETS, "gunfire.mp4"))

    final = CompositeVideoClip(
        [audioclip, background, title] + subscriber_videos,
        size=((1920, 1080))).fadeout(0.2)
    return final
Exemplo n.º 2
0
def label_clip(video_path, label, start_second, end_second):
    clip = VideoFileClip(video_path)
    text_clip = TextClip(label, fontsize=40, color='white', bg_color='red')
    text_clip = text_clip.set_pos(('center', 'bottom'))
    text_clip = text_clip.set_start(start_second).set_duration(end_second -
                                                               start_second)
    return CompositeVideoClip([clip, text_clip])
Exemplo n.º 3
0
def create_video_of_list_of_clips(clips, output):
    print('Rendering video to location  %s' % (output))
    final_clips = []

    for clip in clips:
        path = constants.DOWNLOAD_LOCATION + clip.channel.slug + '/' + clip.slug + '.mp4'

        print(path)

        video = VideoFileClip(path)
        title = TextClip(txt=clip.channel.name + ': ' + clip.title,
                         font='Amiri-regular',
                         color='white',
                         fontsize=55).set_duration(8)
        title_mov = title.set_pos((0.05, 0.8), relative=True)

        # Create video object with text
        final_clip = CompositeVideoClip([video, title_mov]).resize((1280, 720))
        final_clips.append(final_clip)

        # Remove from memory
        del title
        del video
        del final_clip

    # Add clips together
    finished = concatenate_videoclips(final_clips, method='compose')

    # Render video
    finished.write_videofile(output, fps=30)
Exemplo n.º 4
0
 def Gen_Video(self, beat_times, mp3path, uuid):
     FONT_URL = '../font/heimi.TTF'
     with open(uuid + '.txt', 'r', encoding='utf-8') as f:
         text_str = f.read()
     word_list = text_str.split('\n')
     clips = []
     for index, beat_time in enumerate(beat_times[:-1]):
         if index >= len(word_list):
             break
         print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
         text_clip = TextClip(
             word_list[index],
             fontsize=320 // 8,
             color='white',
             size=(320, 640),
             method='caption',
             font=FONT_URL) \
             .set_start(beat_time) \
             .set_end(beat_times[index + 1])
         text_clip = text_clip.set_pos('center')
         clips.append(text_clip)
     final_clip = CompositeVideoClip(clips)
     audio_clip = AudioFileClip(mp3path)
     final_video = final_clip.set_audio(audio_clip)
     final_video.write_videofile(str(uuid) + '.mp4',
                                 fps=30,
                                 codec='mpeg4',
                                 preset='ultrafast',
                                 audio_codec="libmp3lame",
                                 threads=4)
Exemplo n.º 5
0
 def prepend_intertitle(
     self,
     size: Optional[Size] = None,
     color: str = DEFAULT_INTERTITLE_COLOR,
     font: str = DEFAULT_INTERTITLE_FONT,
     fontsize: int = DEFAULT_INTERTITLE_FONTSIZE,
     position: str = DEFAULT_INTERTITLE_POSITION,
     duration: int = DEFAULT_INTERTITLE_DURATION,
 ):
     if not self.meta.text:
         logger.warning('%s: Missing intertitle text')
         return
     logger.info('%s: Intertitle "%s"', self.meta.path, self.meta.text)
     if not size:
         size = Size(width=self.video_file_clip.w,
                     height=self.video_file_clip.h)
     text_clip = TextClip(
         self.meta.text.replace('|', '\n'),
         size=(size.width * INTERTITLE_TEXT_WIDTH_FACTOR, None),
         color=color,
         font=font,
         fontsize=fontsize,
         method='caption',
         align='center',
     )
     composite_clip = CompositeVideoClip([text_clip.set_pos(position)],
                                         (size.width, size.height))
     intertitle_clip = composite_clip.subclip(0, duration)
     self.video_file_clip = concatenate_videoclips(
         [intertitle_clip, self.video_file_clip], method='compose')
Exemplo n.º 6
0
def annotate(clip, txt, speaker, txt_color='white', fontsize=30, font='Arial'):
    """ Writes a text at the bottom of the clip. """
    txt_colors = ['red', 'black', 'white', 'blue', 'green']
    txtclip = TextClip(txt,
                       fontsize=fontsize,
                       font=font,
                       color=txt_colors[speaker])
    cvc = CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
    return cvc.set_duration(clip.duration)
Exemplo n.º 7
0
 def __init__(self, *args, **kwargs):
     self.clips = []
     self.add_timestamp = kwargs.pop("timestamp", False)
     self.timelapse = ImageSequenceClip(*args, **kwargs)
     self.clips.append(self.timelapse)
     if self.add_timestamp:
         txt_clip = TextClip("MoviePy ROCKS", fontsize=50, color="white")
         self.txt_clip = txt_clip.set_pos("center").set_duration(5)
         self.clips.append(self.txt_clip)
     super(TimestampedImageSequenceClip, self).__init__(self.clips)
Exemplo n.º 8
0
 def __init__(self, *args, **kwargs):
     self.clips = []
     self.add_timestamp = kwargs.pop('timestamp', False)
     self.timelapse = ImageSequenceClip(*args, **kwargs)
     self.clips.append(self.timelapse)
     if self.add_timestamp:
         txt_clip = TextClip("MoviePy ROCKS", fontsize=50, color='white')
         self.txt_clip = txt_clip.set_pos('center').set_duration(5)
         self.clips.append(self.txt_clip)
     super(TimestampedImageSequenceClip, self).__init__(self.clips)
Exemplo n.º 9
0
def make(name):
    clip1 = ImageClip("./images/{}.jpg".format(name))
    txt = TextClip(name.encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=96)
    txt = txt.on_color(size=(clip1.w, txt.h + 10),
                       color=(0, 0, 0),
                       pos=(6, "center"),
                       col_opacity=0.6)
    txt = txt.set_pos(lambda t: (max(clip1.w / 7, int(
        clip1.w - 1 * clip1.w * t)), max(3 * clip1.h / 4, int(100 * t))))
    return CompositeVideoClip([clip1, txt]).set_duration(3)
Exemplo n.º 10
0
    def _generate_text(text):
        common = {
            "txt": text,
            "size": (600, 100),
            "method": "caption",
            "font": "Impact",
            "fontsize": 30,
            "align": "South",
        }

        txt_clip = TextClip(color="white",
                            stroke_color="white",
                            stroke_width=1,
                            **common)
        txt_clip = txt_clip.set_pos((20, 250))

        txt_bg = TextClip(color="black",
                          stroke_color="black",
                          stroke_width=5,
                          **common)
        txt_bg = txt_bg.set_pos((20, 250))

        return CompositeVideoClip([txt_bg, txt_clip], size=(720, 1280))
Exemplo n.º 11
0
 def process_package(source: str, parameters: AnnotationParameters):
     log.info(''.join(["SOURCE: ", source]))
     main_clip = VideoFileClip(source)
     log.info(''.join([
         "Parameters: Font size: ",
         str(parameters.font_size), " Text: ", parameters.text,
         " duration: ",
         str(parameters.duration), " color:", parameters.color
     ]))
     txt_clip = TextClip(parameters.text,
                         fontsize=parameters.font_size,
                         color=parameters.color)
     txt_clip = txt_clip.set_pos('center').set_duration(parameters.duration)
     video = CompositeVideoClip([main_clip, txt_clip])
     log.info(''.join(["Video processed correctly: "]))
     return video
Exemplo n.º 12
0
def main(width, height, text, music, word_split, output):
    with open(text, 'r', encoding='utf-8') as f:
        text_str = f.read()
    if word_split:
        seg_list = jieba.lcut(text_str)
        punct = set(''':!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐、﹒
		﹔﹕﹖﹗﹚﹜﹞!),.:;?|}︴︶︸︺︼︾﹀﹂﹄﹏、~¢
		々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖([{£¥〝︵︷︹︻
		︽︿﹁﹃﹙﹛﹝({“‘-—_…/\\''')
        word_list = list(filter(lambda x: x not in punct, seg_list))
    else:
        word_list = text_str.split('\n')

    y, sr = librosa.load(music)
    tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
    beat_times = list(librosa.frames_to_time(beats, sr=sr))
    beat_times.append(beat_times[-1] + 1)

    clips = []
    for index, beat_time in enumerate(beat_times[:-1]):
        if index >= len(word_list):
            break
        print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
        text_clip = TextClip(
         word_list[index],
         fontsize=width // 8,
         color='white',
         size=(width, height),
         method='caption',
         font='msyhbd.ttc')\
         .set_start(beat_time)\
         .set_end(beat_times[index + 1])
        text_clip = text_clip.set_pos('center')
        clips.append(text_clip)

    final_clip = CompositeVideoClip(clips)
    audio_clip = AudioFileClip(music)
    final_video = final_clip.set_audio(audio_clip)
    final_video.write_videofile(output,
                                fps=30,
                                codec='mpeg4',
                                preset='ultrafast',
                                audio_codec="libmp3lame",
                                threads=4)
Exemplo n.º 13
0
def main(width, height, text, music, output):
    with open(text, 'r', encoding='utf-8') as f:
        word_list = f.readlines()
    words = "".join(word_list)
    words_num = len(filter_text(words))

    # 每个字的时长
    time_len = librosa.get_duration(filename=music)
    unit_time = time_len / words_num

    # 生成每句话的TextClip
    clips = []
    start = 0
    end = 0
    for text in word_list:
        start = end
        text = filter_text(text)
        end = start + unit_time * len(text)
        text_clip = TextClip(
            text,
            fontsize=width // 12,
            color='white',
            size=(width, height),
            method='caption',
            font='msyhbd.ttc')\
            .set_start(start)\
            .set_end(end)
        text_clip = text_clip.set_pos('center')
        clips.append(text_clip)

    # 生成最终的视频文件
    final_clip = CompositeVideoClip(clips)
    audio_clip = AudioFileClip(music)
    final_video = final_clip.set_audio(audio_clip)
    final_video.write_videofile(output,
                                fps=30,
                                codec='mpeg4',
                                preset='ultrafast',
                                audio_codec="libmp3lame",
                                threads=4)
Exemplo n.º 14
0
def proccess():
    # ON OBTIENS LA VIDEO DE CETTE FAÇON
    video_brute = VIDEO PATH
    # ON RECUPERE DE LE NOM ORIGINAL DE LA VIDEO
    nom_brute = video_brute.name
    # ON RECUPERE L'EXTENSION DE LA VIDEO (mp4, mov...)
    extension = nom_brute.split(".")[-1]
    # ON GENERE UN NOM ALEATOIRE POUR NOTRE VIDEO AVEC LA FONCTION GET_RANDOM_STRING
    nom1 = get_random_string(10, 'abcd012opqrs3456jklmn789efghi')
    # ON TRAITE LA VIDEO AVEC LA BIBLIOTHEQUE MOVIEPY
    video_traitee = VideoFileClip(video_a_traiter.video.path)
    # ON GENERE UNE MINIATURE POUR NOTRE VIDEO    
    miniature = video_traitee.save_frame('NOM DU FICHIER' +".png", t=5)
    # ON OBTIENS LA LARGEUR, LA HAUTEUR ET LA DUREE DE NOTRE FICHIER VIDEO
    largeur = video_traitee.w
    hauteur = video_traitee.h
    duree = video_traitee.duration
    # ON PREPARE DU TEXTE A AJOUTER SUR NOTRE VIDEO, ON SPECIFIE LA POSITION ET LA DUREE D'APPARITION DU TEXTE
    texte = TextClip("HYACINTHE KOUADIO",fontsize=20,color='white')
    texte = texte.set_pos('top').set_duration(7)
    # ON FAIS LA COMPOSITION DE NOTRE TEXTE AVEC LA VIDEO 
    video_compressee = CompositeVideoClip([video_traitee, texte])
    # ENFIN ON COMPRESSE LA VIDEO EN UN AUTRE FORMAT (webm)
    video_compressee.write_videofile("compress.webm")
Exemplo n.º 15
0
def write_audio_clips_to_disk(variations, sample_freq):
    for p in variations.keys()[:]:
        wavfile.write('sound.wav', sample_freq, variations[p].astype(int16))

if __name__ == '__main__':
    screensize = (854, 480)
    clips = []
    # intro screen
    intro_txt = TextClip("""119 Variations\non a theme by Samsung\n'The whistling ringtone'""",
                         color='white', 
                         font='Baskerville Old Face Normal',
                         kerning=5, 
                         fontsize=35)
    
    intro_txt = intro_txt.set_pos('center').set_duration(5)
    intro_cvc = CompositeVideoClip( [intro_txt],
                            size=screensize, transparent=True)
                            
    clips.append(intro_cvc)
    
    # load sound file
    from scipy.io import wavfile
    sample_freq, whistle = wavfile.read("samsung_ringtone.wav")
    t = arange(whistle.shape[0], dtype=float32) / sample_freq
    
    # segment it
    chunk_times = [0., 0.22, 0.38, 0.5, 0.92, 1.2]
    from scipy.signal import get_window    
    chunks = []
    for start, end in zip(chunk_times[:-1], chunk_times[1:]):
Exemplo n.º 16
0
video_clip = VideoFileClip(org_video_path)
audio_clip = AudioFileClip(audio_path)
final_audio = audio_clip.subclip(25, 40)

w, h = video_clip.size
fps = video_clip.fps

intro_duration = 5
intro_text = TextClip("Hello world!",
                      fontsize=70,
                      color='white',
                      size=video_clip.size)
intro_text = intro_text.set_duration(intro_duration)
intro_text = intro_text.set_fps(fps)
intro_text = intro_text.set_pos("center")

# to add audio to your intro:

intro_music = audio_clip.subclip(25, 30)
intro_text = intro_text.set_audio(intro_music)

watermark_size = 50
watermark_text = TextClip(watermark,
                          fontsize=watermark_size,
                          color='black',
                          align='East',
                          size=(w, watermark_size))
watermark_text = watermark_text.set_fps(fps)
watermark_text = watermark_text.set_duration(video_clip.reader.duration)
watermark_text = watermark_text.margin(left=10, right=10, bottom=2, opacity=0)
Exemplo n.º 17
0
#title frontend
title = input("Enter Title Screen Text, leave blank and hit enter to skip: ")

#endscreen
endtext = input("Enter Endscreen Text, leave blank and hit enter to skip: ")
if endtext != "":
    video_files.append(
        TextClip(endtext, fontsize=50, color='white').set_duration(5))

#combine video clips
combined_clip = concatenate_videoclips(video_files, method="compose")

#title backend
if title != "":
    txt_clip = TextClip(title, fontsize=70, color='white')
    txt_clip = txt_clip.set_pos('bottom').set_duration(5)
    video = CompositeVideoClip([combined_clip, txt_clip])
else:
    video = combined_clip

#audio
audio_files = []

for i in os.listdir():
    if i.endswith(".mp3") or i.endswith(".wav"):
        audio_files.append(i)

print("Audio files loaded are: " + str(audio_files))

for i, clip in enumerate(audio_files):
    audio_files[i] = AudioFileClip(clip)
Exemplo n.º 18
0
def create_video(request):
    screensize = (720,460)
    txtClip = TextClip('Cool effect', color='white', font="Amiri-Bold",
                       kerning=5, fontsize=100)
    cvc = CompositeVideoClip( [txtClip.set_pos('center')],
                              size=screensize)

	# THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS

	# helper function
    rotMatrix = lambda a: np.array( [[np.cos(a),np.sin(a)],
                                     [-np.sin(a),np.cos(a)]] )

    def vortex(screenpos,i,nletters):
		d = lambda t : 1.0/(0.3+t**8) #damping
		a = i*np.pi/ nletters # angle of the movement
		v = rotMatrix(a).dot([-1,0])
		if i%2 : v[1] = -v[1]
		return lambda t: screenpos+400*d(t)*rotMatrix(0.5*d(t)*a).dot(v)

    def cascade(screenpos,i,nletters):
		v = np.array([0,-1])
		d = lambda t : 1 if t<0 else abs(np.sinc(t)/(1+t**4))
		return lambda t: screenpos+v*400*d(t-0.15*i)

    def arrive(screenpos,i,nletters):
		v = np.array([-1,0])
		d = lambda t : max(0, 3-3*t)
		return lambda t: screenpos-400*v*d(t-0.2*i)

    def vortexout(screenpos,i,nletters):
		d = lambda t : max(0,t) #damping
		a = i*np.pi/ nletters # angle of the movement
		v = rotMatrix(a).dot([-1,0])
		if i%2 : v[1] = -v[1]
		return lambda t: screenpos+400*d(t-0.1*i)*rotMatrix(-0.2*d(t)*a).dot(v)

	# WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER

    letters = findObjects(cvc) # a list of ImageClips

	# WE ANIMATE THE LETTERS

    def moveLetters(letters, funcpos):
		return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters)))
				  for i,letter in enumerate(letters)]

    clips = [ CompositeVideoClip(moveLetters(letters,funcpos),
								  size = screensize).subclip(0,5)
			  for funcpos in [vortex, cascade, arrive, vortexout] ]

	# WE CONCATENATE EVERYTHING AND WRITE TO A FILE

    final_clip = concatenate_videoclips(clips)
    audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration)
    final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0)

    #final_clip = vfx.resize(final_clip, (570, 570))

    final_clip.write_videofile('videos/coolTextEffects.mp4',
                               fps=23, codec='libx264',
                               audio_bitrate='1000k', bitrate='4000k')

    #final_clip.write_gif('videos/coolGif.gif', fps=23)

    html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>"
    return HttpResponse(html)
Exemplo n.º 19
0
import numpy as np
from moviepy.editor import CompositeVideoClip, TextClip, concatenate_videoclips
from moviepy.video.tools.segmenting import findObjects

# WE CREATE THE TEXT THAT IS GOING TO MOVE, WE CENTER IT.
screensize = (720, 460)
txtClip = TextClip('so hard...',
                   color='white',
                   font="FangSong",
                   kerning=5,
                   fontsize=100)
cvc = CompositeVideoClip([txtClip.set_pos('center')], size=screensize)
# cvc.preview()

# THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS

# helper function
rotMatrix = lambda a: np.array([[np.cos(a), np.sin(a)],
                                [-np.sin(a), np.cos(a)]])


def vortex(screenpos, i, nletters):
    d = lambda t: 1.0 / (0.3 + t**8)  # damping
    a = i * np.pi / nletters  # angle of the movement
    v = rotMatrix(a).dot([-1, 0])
    if i % 2: v[1] = -v[1]
    return lambda t: screenpos + 400 * d(t) * rotMatrix(0.5 * d(t) * a).dot(v)


def cascade(screenpos, i, nletters):
    v = np.array([0, -1])
Exemplo n.º 20
0
def build_start_title(vid, title_end, texts, text_i):
    dirname = os.path.dirname(__file__) # Relative path to the folder
    # Images
    maila = "Karttu.png"
    logo = "logo.png"
    # Fonts
    font = "Ebrima-Bold"
    # font = "Fixedsys Regular"
    fontsize = 150
    color = "#e94b3cff"
    stroke_color = "#1d1b1b"

    if float(title_end) - 4.0 > 0.0:
        clip = vid.subclip(float(title_end) - 4.0, title_end)
    else: # if start is shorter than 4 s
        rate = title_end / 4
        clip = vid.subclip(0, title_end).speedx(rate)
    t = texts[text_i]
    t = t.replace('\\n', '\n')
    t = t.split(";")    # separate players and event. Example: player1 vs. player2; Event 2020
    if len(t) == 2:
        players = t[0]
        event = t[1]
    else:
        event = t[0]
        players = ""

    maila_img = ImageClip(os.path.join(dirname ,maila), duration = 4)\
                .resize(height = int(clip.size[1] * 0.7))
    logo_img = ImageClip(os.path.join(dirname ,logo), duration = 4)\
                .resize(width = maila_img.size[0] + 100)\
                .set_position(("center", "top"))
    maila_img = maila_img.margin(top = logo_img.size[1],\
                                 left = int((logo_img.size[0] - maila_img.size[0]) / 2),\
                                 right = int((logo_img.size[0] - maila_img.size[0]) / 2),\
                                 opacity = 0)
    images = CompositeVideoClip([maila_img,logo_img])
    images_rot = images.set_position("center")\
                .resize(lambda t: min(0.2 + t*1.5 , 1))\
                .rotate(lambda t: 500 * (tanh(t*4 + 0.5) * -5 + 5), resample = "nearest")
    event_txt = TextClip(event,
                         stroke_color = stroke_color,\
                         stroke_width = 4, color=color,\
                         font= font,\
                         kerning = 5,\
                         fontsize=fontsize)\
                .set_duration(4)
    event_txt = event_txt.resize(width = clip.size[0] / 2 - 300)
    event_txt = event_txt.set_pos(lambda t:(min(clip.size[0]/2 + 100,-800 + t * 1500),clip.size[1]/2))
    player_txt = TextClip(players,\
                          stroke_color = stroke_color,\
                          stroke_width = 4, color=color,\
                          font=font,\
                          kerning = 5,\
                          fontsize=fontsize)\
                .set_duration(4)
    player_txt = player_txt.set_pos(lambda t:(max(clip.size[0]/2 - player_txt.size[0]- 100, clip.size[0] + 600 + t * -1800),clip.size[1]/2 - player_txt.size[1]/2))
    mask_left = np.zeros((clip.size[1],clip.size[0], 4))
    mask_right = B = np.copy(mask_left)
    mask_left = cv2.rectangle(mask_left, (0, 0), (int(clip.size[0]/2), clip.size[1]), (255,255,255,255), -1)
    mask_right = cv2.rectangle(mask_right, (int(clip.size[0]/2), 0), (clip.size[0], clip.size[1]),  (255,255,255,255), -1)
    mask_left = ImageClip(mask_left, duration=2, ismask=True)
    mask_right = ImageClip(mask_right, duration=2, ismask=True)
    # cv2.imwrite("kala.png",mask_right)
    comp_clip = CompositeVideoClip([clip, event_txt, clip.set_mask(mask_left)])
    comp_clip = CompositeVideoClip([comp_clip, player_txt, comp_clip.set_mask(mask_right), images_rot])
    return comp_clip, text_i + 1
Exemplo n.º 21
0
 def process_by_script(main_clip):
     video = main_clip.fx(vfx.blackwhite).fx(vfx.painting).subclip(0, 10)
     txt_clip = TextClip("Test", fontsize=16, color='white')
     txt_clip = txt_clip.set_pos('center').set_duration(10)
     final_clip = CompositeVideoClip([video, txt_clip])
     return final_clip
songClip = AudioFileClip("./Dekhte_Dekhte.mp3") # extract audio clip
lrcFile = open('./Dekhte_Dekhte.lrc', 'r')
lines = lrcFile.readlines() # read the lyrics

bgImageClip = ImageClip('./img/background.jpg') # background image for the video

beginTimeStamp = '00:00.00'
firstLyricTimeStamp = lines[5][1:9]
songDuration = lines[-1][1:9]
screensize = (1920, 1080)
lastLineFlag = 0

fillerText = TextClip('Dekthe Dekthe',color='white', font="Amiri-Bold", kerning = 5, fontsize=100) # Text to dispaly before lyrics start
fillerDuration = timeDiff(firstLyricTimeStamp, beginTimeStamp) # time before lyrics start
fillerVideoClip = CompositeVideoClip( [bgImageClip.set_pos('center').set_duration(fillerDuration), fillerText.set_pos('center').set_duration(fillerDuration)], size=screensize) # video clip where filler text is displayed on background image
videoClips = [fillerVideoClip] # add the video clip to a list

for i in range(5, len(lines)): #start reading lyrics
    lyricStrip = lines[i][10:]
    currentLineTimeStamp = lines[i][:10][1:-1]
    print(currentLineTimeStamp)
    origLyric = lyricStrip.split('!!')[0].lstrip().rstrip() # lyric strip in original language
    engLyric = lyricStrip.split('!!')[1].lstrip().rstrip() # lyric strip in translated language
    disaplayText = origLyric+'\n'+'\n'+'*******************'+'\n'+engLyric # display text at each time step
    LyricClip = TextClip(disaplayText, color='white', font="Amiri-Bold", kerning = 5, fontsize=60) # text clip for combined lyrics
    if (lines[i+1] == '\n'):
        duration = timeDiff(songDuration, currentLineTimeStamp) # calculate time period to display lyric
        lastLineFlag = 1 # if last lyric is reached
    else:
        nextLineTimeStep = lines[i+1][:10][1:-1]
Exemplo n.º 23
0
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: video.py
Author: dhilipsiva <*****@*****.**>
Date created: 2017-02-11
"""

# Import everything needed to edit video clips
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip

# Load baby-cry.webm
clip = VideoFileClip("baby-cry.webm")

# Reduce the audio volume (volume x 0.8)
clip = clip.volumex(0.8)

# Generate a text clip. You can customize the font, color, etc.
txt_clip = TextClip("Baby Cry", fontsize=70, color='white')

# Say that you want it to appear 10s at the center of the screen
txt_clip = txt_clip.set_pos('center').set_duration(3)

# Overlay the text clip on the first video clip
video = CompositeVideoClip([clip, txt_clip])

# Write the result to a file (many options available !)
video.write_videofile("baby-cry-text.webm")