def test_fadein(): color_dict = { "I": (0, 0, 0), "R": (255, 0, 0), "G": (0, 255, 0), "B": (0, 0, 255), "W": (255, 255, 255), } clip = BitmapClip([["R"], ["G"], ["B"]], color_dict=color_dict, fps=1) clip1 = fadein(clip, 1) # default initial color target1 = BitmapClip([["I"], ["G"], ["B"]], color_dict=color_dict, fps=1) assert clip1 == target1 clip2 = fadein(clip, 1, initial_color=(255, 255, 255)) # different initial color target2 = BitmapClip([["W"], ["G"], ["B"]], color_dict=color_dict, fps=1) assert clip2 == target2
def process_clip(clip, sound): base_clip = clip base_clip = resize.resize(base_clip, width=640, height=480) title_tts = mp.AudioFileClip(sound.dest + "/title" + sound.filename + ".mp3").set_start(0) title_clip = mp.TextClip(sound.title, fontsize=50, color="white", font="garamond", method="caption", size=(base_clip.size[0], None)) title_clip = title_clip.on_color( size=(title_clip.size[0] + 10, title_clip.size[1] + 10), col_opacity=0.5).set_duration( title_tts.duration).set_position("center") title_clip = fadein.fadein(title_clip, 0.2, (255, 255, 255)) comment_tts = mp.AudioFileClip(sound.dest + "/comment" + sound.filename + ".mp3") comment_time = max( min(base_clip.duration - comment_tts.duration, base_clip.duration / 2), title_tts.duration + 1) comment_clip = mp.TextClip(sound.top_comment, fontsize=20, color="white", method="caption", size=(base_clip.size[0], None)).on_color(col_opacity=0.5) comment_clip = comment_clip.set_duration( comment_tts.duration).set_start(comment_time).set_position("bottom") comment_clip = fadein.fadein(comment_clip, 0.2, (255, 255, 255)) comment_tts = comment_tts.set_start(comment_time) audio = mp.CompositeAudioClip([title_tts, comment_tts]) base_clip = loop.loop(base_clip, duration=max(base_clip.duration, audio.duration + 1)) newclip = mp.CompositeVideoClip([base_clip, title_clip, comment_clip]) return newclip.set_audio(audio)
def make_video(video_paths): """ Makes videos """ videos = [] for path in video_paths: video = audio_fadein( audio_fadeout( fadein( fadeout( VideoFileClip(path, target_resolution=(1080, 1920)), 1), 1), 1), 1) videos.append(video) black_image = ColorClip((1920, 1080), color=[0, 0, 0], duration=0.5) result_clip = concat(videos, transition=black_image, method="compose") result_clip.write_videofile("result.mp4", fps=60, preset="ultrafast")
def getIntroVid(vidTitle, origCropFrac, sampleHeight, origVidDir, username): if len(username)==0: username="******" titleParts = [vidTitle, " ", "by \n " + username] sizes = [50, 30, 80] introVid = VideoFileClip(origVidDir) (w, h) = introVid.size length = introVid.duration times = [[0, int(length/3)], [int(length/3), int(length*2/3)], [int(length*2/3), int(length)]] # times = [[0, 3], # [3, 9], # [9, 14]] introVid = introVid.crop(height=int(round((1 - origCropFrac*2) * sampleHeight, 0)), width = w, x_center=w/2, y_center=h/2) clips=[] iPart=0 while iPart < len(titleParts): title=titleParts[iPart] if times[iPart][1]>introVid.duration: times[iPart][1] = introVid.duration vidClip = introVid.subclip(times[iPart][0], times[iPart][1]) text = TextClip(title, font="Amiri-Bold", fontsize=sizes[iPart], color="white", align = 'center').set_position(("center",0.3), relative=True) text_clip = mpe.CompositeVideoClip([vidClip, text]).set_duration(vidClip.duration) clips.append(text_clip) iPart = iPart + 1 final_clip = concatenate_videoclips(clips, method='compose') final_clip1 = fadeout(final_clip, 1, final_color=None) final_clip2 = fadein(final_clip1, 1, initial_color=None) return final_clip2
def test_fadein(): with VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0, 5) as clip: clip1 = fadein(clip, 1) clip1.write_videofile(os.path.join(TMP_DIR, "fadein1.webm"))
def test_fadein(): clip = get_test_video() clip1 = fadein(clip, 0.5) clip1.write_videofile(os.path.join(TMP_DIR, "fadein1.webm")) close_all_clips(locals())
def test_fadein(): with VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0,5) as clip: clip1 = fadein(clip, 1) clip1.write_videofile(os.path.join(TMP_DIR,"fadein1.webm"))
def genPMVs(PMV, dot, sampleHeight, sampleWidth, pythonDir): # file # outDir ydl_opts = { 'outtmpl': PMV.musicDir + '%(title)s' + '.mp4', 'format': 'best', 'playlist': 'no' } with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.cache.remove() info_dict = ydl.extract_info(PMV.musicURL, download=False) print('downloading?', PMV.musicURL) ydl.download([PMV.musicURL]) musicName = info_dict.get('title', None) for vid in PMV.videoURLs: print(vid) subprocess.call([pythonDir, r"downloadVid.py", PMV.vidDir, vid]) vidDir = PMV.vidDir if PMV.musicType == 'mp4': originalVidBool = True elif PMV.musicType == 'mp3': originalVidBool = False else: originalVidBool = False print(AudioSegment.ffmpeg) ################################ mp3_dir = PMV.musicDir + musicName + dot + 'mp4' # PMV.musicType ############################### if len(PMV.vidName) == 0: PMV.vidName = PMV.userName + ' PMV - ' + musicName elif len(PMV.userName) > 0: PMV.vidName = PMV.vidName + ' - ' + PMV.userName file_out = PMV.outDir + PMV.vidName + dot + 'mp4' # filetypeout print(mp3_dir) audioclip = AudioFileClip(mp3_dir) print('error1') sound = AudioSegment.from_file(mp3_dir, 'mp4') print('error2') if PMV.trimSong == True: sound = sound[PMV.songStart * 1000:PMV.songEnd * 1000] audioclip = audioclip.subclip(PMV.songStart, PMV.songEnd) print('error3') tenSecs = 10 * 1000 first_10_seconds = sound[:tenSecs] ten_data = first_10_seconds._data first_ten_data = np.fromstring(ten_data, dtype=np.int16) new_ten_data = np.absolute(first_ten_data) # %% Music Data bitrate = len(new_ten_data) / 10 # raw data to 1 s print(bitrate) ratio = int(round(bitrate * PMV.granularity, 0)) raw_data = sound._data first_data = np.fromstring(raw_data, dtype=np.int16) new_data = np.absolute(first_data) reshaped_data = reshapeData(new_data, ratio) diff_data = getElementDiff(reshaped_data) result = getHighValues2(reshaped_data, diff_data, PMV.sd_scale, PMV.nSplits, PMV.granularity, PMV.min_length) print('List of Indices of maximum element :', len(result)) print(result) print(vidDir) result.append(len(first_data) / ratio) videosIn = list() iVids = 0 for f in listdir(vidDir): if isfile: if f.endswith(".mp4"): if iVids < 120: videosIn.append(f) iVids = iVids + 1 videoData = list() for i in videosIn: videoData.append( Video(name=i, customStart=0, customEnd=0, directory=vidDir)) print(i) nInVids = len(videosIn) iOrig = 0 origVidName = musicName + dot + PMV.musicType if originalVidBool == True: while iOrig <= nInVids * PMV.origVidScale: videoData.append( Video(name=origVidName, customStart=0, customEnd=0, directory=PMV.musicDir)) iOrig = iOrig + 1 nVideos = len(videoData) videos = [0] * nVideos i = 0 while i < nVideos: if PMV.resize == True: vidTemp = VideoFileClip( videoData[i].directory + videoData[i].name).resize( width=sampleWidth) #(sampleWidth, sampleHeight)) if PMV.flipBool == True and videoData[i].name != origVidName: vidTemp2 = mirror_x(vidTemp) else: vidTemp2 = vidTemp videos[i] = vidTemp2 else: videos[i] = VideoFileClip(vidDir + videoData[i].name) print('name', 'duration', 'customStart', 'customEnd') if originalVidBool == True and videoData[i].name == origVidName: if PMV.trimSong == True: videoData[i].customEnd = PMV.songEnd videoData[i].customStart = PMV.songStart else: videoData[i].customEnd = videos[i].duration videoData[i].customStart = 0 # print(videoData[i].name, videos[i].duration, videoData[i].customStart, videoData[i].customEnd, 'Original Video') else: customStart = PMV.startTime subtractEnd = 40 videoData[i].customEnd = videos[i].duration - subtractEnd videoData[i].customStart = customStart print(videoData[i].name, videos[i].duration, videoData[i].customStart, videoData[i].customEnd) i = i + 1 clips = videoSplits(result, videos, videoData, first_data, bitrate, PMV.granularity, PMV.randomise, origVidName) for attempt in range(3): try: print('stage 1') finalVideo = concatenate_videoclips(clips, method='compose') if PMV.originalCrop == True: (w, h) = finalVideo.size print(PMV.origCropFrac, int(sampleHeight * PMV.origCropFrac), int((1 - PMV.origCropFrac) * sampleHeight), w, h) finalVideo = finalVideo.crop(height=int( round((1 - PMV.origCropFrac * 2) * sampleHeight, 0)), width=w, x_center=w / 2, y_center=h / 2) print('stage 2') finalVideo.volumex(0) print('stage 3') finalVideo2 = finalVideo.set_audio(audioclip) print('stage 4') finalVideo2a = fadeout(finalVideo2, 1, final_color=None) finalVideo2c = fadein(finalVideo2a, 1, initial_color=None) if PMV.addIntro: introVideo = getIntroVid(PMV.vidName, PMV.origCropFrac, sampleHeight, PMV.introVidDir, PMV.userName) finalVideo3 = concatenate_videoclips( [introVideo, finalVideo2c], method='compose') else: finalVideo3 = finalVideo2c print('stage 4') print('stage 5') finalVideo3.write_videofile(file_out, threads=4, fps=25) except OSError as OSErrorMessage: print("OSError retrying - Attempt: ", attempt) print(OSErrorMessage) pass else: break print('stage 6') i = 0 while i < len(videos): videos[i].reader.close() del videos[i].reader del videos[i] i = i + 1 del audioclip.reader del audioclip print('Finished!')