def create_videoclip(self, post_number, is_comment=False, comment_number=0): """ Creates a video from the given details from the image and video paths. """ aud_path = f"audios/post_{post_number}.mp3" img_path = f"images/post_{post_number}.png" out_path = f"videos/post_{post_number}.mp4" if is_comment: aud_path = f"audios/post_{post_number}_comment_{comment_number}.mp3" img_path = f"images/post_{post_number}_comment_{comment_number}.png" out_path = f"videos/post_{post_number}_comment_{comment_number}.mp4" print( f"Creating video {out_path} from audio {aud_path} and image {img_path}\n" ) aud_clip = AudioFileClip(aud_path) vid_clip = ImageClip(img_path) vid_clip = vid_clip.set_audio(aud_clip).set_duration(aud_clip.duration) vid_clip.write_videofile(out_path, preset="medium", temp_audiofile='temp-audio.m4a', remove_temp=True, codec="mpeg4", audio_codec="aac", fps=24)
def video_creation(): # command line arguments [main.py, image, audio, min_length, movie_name] if len(sys.argv) < 5: arg_error('{0} args supplied but {1} args required.'.format( len(sys.argv) - 1, 4)) location = '' # default is no location, i.e., save video in working directory if len(sys.argv) > 5: if len(sys.argv[5]) > 0: # allow empty path for future compatibility location = sys.argv[5] # append slash if required if location[-1] != '/' and location[-1] != '\\': location += '/' try: # create directory if needed (works recursively) pathlib.Path(location).mkdir(parents=True, exist_ok=True) except Exception as e: arg_error( 'problem with location \'{0}\'. Either it could not be found or it could not be created.' .format(location), e) image_src = sys.argv[1] audio_src = sys.argv[2] try: min_length = int( str(sys.argv[3]).split( # format: Minutes.Seconds '.')[0]) * 60 + int(str( sys.argv[3]).split('.')[1]) # converted to seconds except Exception as e: arg_error( '[min_length] not parsable. \'{0}\' was supplied when the correct format is \'numbers.numbers\', e.g., \'123.59\'.' .format(sys.argv[3]), e) if min_length < 1: min_length = 1 # keep min_length no smaller than one second movie_name = sys.argv[4] + '.mp4' # beginning of moviepy functions image_clip = ImageClip(image_src) # get image audio = AudioFileClip(audio_src) # get audio # how many complete audio loops are needed to meet the min_length? loops_needed = int(min_length / audio.duration + 1) # loop our audio to meet the min_length audio = audio_loop.audio_loop(audio, nloops=loops_needed) # set up our clip image_clip = image_clip.set_audio(audio) image_clip.fps = 24 image_clip.duration = audio.duration # render! image_clip.write_videofile(location + movie_name, preset='ultrafast', threads=multiprocessing.cpu_count())
def outro(): outroimg = pathlib.Path(RESOURCES + "/images/outputMoment.jpg") audio = AudioFileClip(pathlib.Path(RESOURCES + "/sounds/outroaud.wav")) music = AudioFileClip(pathlib.Path(RESOURCES + "/sounds/jazz_lounge.mp3")) final_audio = CompositeAudioClip([audio, music]) outro = ImageClip(outroimg) outro = outro.set_fps(24) outro = outro.set_audio(final_audio) outro = outro.set_duration(30) outro.write_videofile(pathlib.Path(RESOURCES + "/vids/outro.mp4"))
def add_static_image_to_audio(image_path, audio_path, output_path): """Create and save a video file to `output_path` after combining a static image that is located in `image_path` with an audio file in `audio_path`""" # create the audio clip object audio_clip = AudioFileClip(audio_path) # create the image clip object image_clip = ImageClip(image_path) # use set_audio method from image clip to combine the audio with the image video_clip = image_clip.set_audio(audio_clip) # specify the duration of the new clip to be the duration of the audio clip video_clip.duration = audio_clip.duration # set the FPS to 1 video_clip.fps = 1 # write the resuling video clip video_clip.write_videofile(output_path)
def write_output(audio_clip, output_filename, background=None, verbose=False): if verbose: print("Storing clip {} to {} (background={})".format( audio_clip, output_filename, background)) if not background: audio_clip.write_audiofile(output_filename, fps=16000, nbytes=2, bitrate='16k', verbose=verbose) else: clip = ImageClip(background, duration=audio_clip.duration) clip = clip.set_audio(audio_clip) clip.write_videofile(output_filename, fps=1, audio_fps=16000, audio_nbytes=2, audio_bitrate='16k', verbose=verbose)
def write_output(audio_clip, output_filename, background=None, verbose=False): if not background: audio_clip.write_audiofile( output_filename, fps=16000, nbytes=2, bitrate='16k', verbose=verbose ) else: clip = ImageClip(background, duration=audio_clip.duration) clip = clip.set_audio(audio_clip) clip.write_videofile( output_filename, fps=1, audio_fps=16000, audio_nbytes=2, audio_bitrate='16k', verbose=verbose )
def temp_audio_file(open_data=True, duration=2, suffix='.mp3', delete=True): assert suffix in ['.mp3', '.mp4'], 'Invalid suffix type:%s' % suffix # logic taken from https://zulko.github.io/moviepy/ref/AudioClip.html?highlight=sin with utils.temp_file(suffix=suffix, delete=delete) as temp_file: audio_frames = lambda t: 2 *[np.sin(404 * 2 * np.pi * t)] audioclip = AudioClip(audio_frames, duration=duration) if suffix == '.mp3': audioclip.write_audiofile(temp_file, verbose=False) else: image = ImageClip(np.random.rand(30, 30, 3) * 255) videoclip = image.set_audio(audioclip) videoclip.duration = duration videoclip.fps = 24 videoclip.write_videofile(temp_file, verbose=False) try: if not open_data: yield temp_file else: with open(temp_file, 'r') as f: data = f.read() yield data finally: pass
variations = OrderedDict() for p in permutations((0, 1, 2, 3, 4)): out = [] for elem in p: if len(out) == 0: out = chunks[elem].copy() else: out = concatenate((out, chunks[elem].copy())) variations[str(p)] = out.copy() cnt = 0 for p in variations.keys()[:]: cnt += 1 print cnt, p = str(p) # title clip title_clip = TextClip(p, color='white', fontsize=30).set_pos('center').set_duration(2) clips.append(CompositeVideoClip([title_clip], size=screensize)) # generate output files display_audio_clip(variations[p], sample_freq, p) wavfile.write('sound.wav', sample_freq, variations[p].astype(int16)) # load them with MoviePy aud_clip = AudioFileClip('sound.wav', fps=sample_freq) im_clip = ImageClip("tmp.png") im_clip = im_clip.set_audio(aud_clip) im_clip = im_clip.set_duration(aud_clip.duration) clips.append(CompositeVideoClip([im_clip], size=screensize)) video = concat_clips(clips) video.to_videofile("SamsungVariations.avi", codec='mpeg4')
from moviepy.editor import VideoFileClip, concatenate_videoclips, clips_array, CompositeVideoClip, ImageClip, AudioFileClip import sys import pathlib baseURL = pathlib.Path(__file__).parent.absolute() id = sys.argv[1] audio = sys.argv[2] foto = sys.argv[3] #Video clip1 = VideoFileClip(f"{baseURL}/video.mp4") #Audio audioclip = AudioFileClip(f"{baseURL}/{audio}.mp3") #Foto # fotoClip = ImageClip(f"{baseURL}/imagem.png", duration=13, transparent=True) fotoClip = ImageClip(foto, duration=13, transparent=True) fotoClip = fotoClip.set_audio(audioclip) fotoClip = fotoClip.resize(width=100, height=100) fotoClip = fotoClip.set_position((110, 150)) fotoClip = fotoClip.add_mask().rotate(40) fotoClip = fotoClip.set_start(65) final_clip = CompositeVideoClip([clip1, fotoClip]) final_clip.write_videofile(f"{baseURL}/../storage/app/videos/video_{id}.mp4")