def test_audiofileclip_concat(): sound = AudioFileClip("media/crunching.mp3") sound = sound.subclip(1, 4) # Checks it works with videos as well sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm") concat = concatenate_audioclips((sound, sound2)) concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
def trim_original_audio_to_audio_subclips(input_video_file_name, matched_time_section, output_file_name): """ save edited audio where my bias appeared in video """ audio = AudioFileClip(input_video_file_name) audio_subclips = [audio.subclip(s, e) for (s, e) in matched_time_section] editted_audio = concatenate_audioclips(audio_subclips) editted_audio.write_audiofile(output_file_name)
def test_concatenate_audiofileclips(): clip1 = AudioFileClip("media/crunching.mp3").subclip(1, 4) # Checks it works with videos as well clip2 = AudioFileClip("media/big_buck_bunny_432_433.webm") concat_clip = concatenate_audioclips((clip1, clip2)) concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3")) assert concat_clip.duration == clip1.duration + clip2.duration close_all_clips(locals())
def test_audiofileclip_concat(): if sys.platform.startswith("win"): pytest.skip("Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error") sound = AudioFileClip("media/crunching.mp3") sound = sound.subclip(1, 4) # Checks it works with videos as well sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm") concat = concatenate_audioclips((sound, sound2)) concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
def audio_loop(clip, n_loops=None, duration=None): """Loops over an audio clip. Returns an audio clip that plays the given clip either `n_loops` times, or during `duration` seconds. Examples ======== >>> from moviepy import * >>> videoclip = VideoFileClip('myvideo.mp4') >>> music = AudioFileClip('music.ogg') >>> audio = afx.audio_loop( music, duration=videoclip.duration) >>> videoclip.with_audio(audio) """ if duration is not None: n_loops = int(duration / clip.duration) + 1 return concatenate_audioclips(n_loops * [clip]).with_duration(duration) return concatenate_audioclips(n_loops * [clip])
def test_audioclip_with_file_concat(): make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] clip1 = AudioClip(make_frame_440, duration=1, fps=44100) clip2 = AudioFileClip("media/crunching.mp3") concat_clip = concatenate_audioclips((clip1, clip2)) return # Fails with strange error # "ValueError: operands could not be broadcast together with # shapes (1993,2) (1993,1993)1 concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_clip_with_file_audio.mp3"))
def test_concatenate_audioclips_render(util, mono_wave): """Concatenated AudioClips through ``concatenate_audioclips`` should return a clip that can be rendered to a file. """ filename = os.path.join(util.TMP_DIR, "concatenate_audioclips.mp3") clip_440 = AudioClip(mono_wave(440), duration=0.01, fps=44100) clip_880 = AudioClip(mono_wave(880), duration=0.000001, fps=22050) concat_clip = concatenate_audioclips((clip_440, clip_880)) concat_clip.write_audiofile(filename, logger=None) assert concat_clip.duration == clip_440.duration + clip_880.duration
def test_concatenate_audioclip_with_audiofileclip(): # stereo A note make_frame = lambda t: np.array( [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)]).T clip1 = AudioClip(make_frame, duration=1, fps=44100) clip2 = AudioFileClip("media/crunching.mp3") concat_clip = concatenate_audioclips((clip1, clip2)) concat_clip.write_audiofile( os.path.join(TMP_DIR, "concat_clip_with_file_audio.mp3")) assert concat_clip.duration == clip1.duration + clip2.duration
def test_audiofileclip_concat(): if sys.platform.startswith("win"): pytest.skip( "Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error" ) sound = AudioFileClip("media/crunching.mp3") sound = sound.subclip(1, 4) # Checks it works with videos as well sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm") concat = concatenate_audioclips((sound, sound2)) concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
def test_concatenate_audioclip_with_audiofileclip(util, stereo_wave): clip1 = AudioClip( stereo_wave(left_freq=440, right_freq=880), duration=1, fps=44100, ) clip2 = AudioFileClip("media/crunching.mp3") concat_clip = concatenate_audioclips((clip1, clip2)) concat_clip.write_audiofile( os.path.join(util.TMP_DIR, "concat_clip_with_file_audio.mp3"), logger=None, ) assert concat_clip.duration == clip1.duration + clip2.duration
def test_audioclip_with_file_concat(): if sys.platform.startswith("win"): pytest.skip("Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error") make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] clip1 = AudioClip(make_frame_440, duration=1, fps=44100) clip2 = AudioFileClip("media/crunching.mp3") concat_clip = concatenate_audioclips((clip1, clip2)) return # Fails with strange error # "ValueError: operands could not be broadcast together with # shapes (1993,2) (1993,1993)1 concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_clip_with_file_audio.mp3"))
def test_concatenate_audioclips_render(): """Concatenated AudioClips through ``concatenate_audioclips`` should return a clip that can be rendered to a file. """ make_frame_440 = lambda t: [np.sin(440 * 2 * np.pi * t)] make_frame_880 = lambda t: [np.sin(880 * 2 * np.pi * t)] clip_440 = AudioClip(make_frame_440, duration=0.01, fps=44100) clip_880 = AudioClip(make_frame_880, duration=0.000001, fps=22050) concat_clip = concatenate_audioclips((clip_440, clip_880)) concat_clip.write_audiofile( os.path.join(TMP_DIR, "concatenate_audioclips.mp3")) assert concat_clip.duration == clip_440.duration + clip_880.duration close_all_clips(locals())
def test_audioclip_concat(): make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] make_frame_880 = lambda t: [sin(880 * 2 * pi * t)] clip1 = AudioClip(make_frame_440, duration=1, fps=44100) clip2 = AudioClip(make_frame_880, duration=2, fps=22050) concat_clip = concatenate_audioclips((clip1, clip2)) # concatenate_audioclips should return a clip with an fps of the greatest # fps passed into it assert concat_clip.fps == 44100 return # Does run without errors, but the length of the audio is way to long, # so it takes ages to run. concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_audioclip.mp3"))
def test_audioclip_concat(): if sys.platform.startswith("win"): pytest.skip("Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error") make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] make_frame_880 = lambda t: [sin(880 * 2 * pi * t)] clip1 = AudioClip(make_frame_440, duration=1, fps=44100) clip2 = AudioClip(make_frame_880, duration=2, fps=22050) concat_clip = concatenate_audioclips((clip1, clip2)) # concatenate_audioclips should return a clip with an fps of the greatest # fps passed into it assert concat_clip.fps == 44100 return # Does run without errors, but the length of the audio is way to long, # so it takes ages to run. concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_audioclip.mp3"))
def test_audioclip_with_file_concat(): if sys.platform.startswith("win"): pytest.skip( "Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error" ) make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] clip1 = AudioClip(make_frame_440, duration=1, fps=44100) clip2 = AudioFileClip("media/crunching.mp3") concat_clip = concatenate_audioclips((clip1, clip2)) return # Fails with strange error # "ValueError: operands could not be broadcast together with # shapes (1993,2) (1993,1993)1 concat_clip.write_audiofile( os.path.join(TMP_DIR, "concat_clip_with_file_audio.mp3"))
def test_concatenate_audioclips_CompositeAudioClip(): """Concatenated AudioClips through ``concatenate_audioclips`` should return a CompositeAudioClip whose attributes should be consistent: - Returns CompositeAudioClip. - Their fps is taken from the maximum of their audios. - Audios are placed one after other: - Duration is the sum of their durations. - Ends are the accumulated sum of their durations. - Starts are the accumulated sum of their durations, but first start is 0 and lastest is ignored. - Channels are the max channels of their clips. """ frequencies = [440, 880, 1760] durations = [2, 5, 1] fpss = [44100, 22050, 11025] clips = [ AudioClip(lambda t: [np.sin(frequency * 2 * np.pi * t)], duration=duration, fps=fps) for frequency, duration, fps in zip(frequencies, durations, fpss) ] concat_clip = concatenate_audioclips(clips) # should return a CompositeAudioClip assert isinstance(concat_clip, CompositeAudioClip) # fps of the greatest fps passed into it assert concat_clip.fps == 44100 # audios placed on after other assert concat_clip.duration == sum(durations) assert list(concat_clip.ends) == list(np.cumsum(durations)) assert list(concat_clip.starts), list(np.cumsum([0, *durations[:-1]])) # channels are maximum number of channels of the clips assert concat_clip.nchannels == max(clip.nchannels for clip in clips) close_all_clips(locals())
def test_audioclip_concat(): if sys.platform.startswith("win"): pytest.skip( "Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error" ) make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] make_frame_880 = lambda t: [sin(880 * 2 * pi * t)] clip1 = AudioClip(make_frame_440, duration=1, fps=44100) clip2 = AudioClip(make_frame_880, duration=2, fps=22050) concat_clip = concatenate_audioclips((clip1, clip2)) # concatenate_audioclips should return a clip with an fps of the greatest # fps passed into it assert concat_clip.fps == 44100 return # Does run without errors, but the length of the audio is way to long, # so it takes ages to run. concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_audioclip.mp3"))
def audio_concatenate(audio_or_files, **kwargs): """ Concatenates sounds. Met bout à bout des sons. @param audio_or_files list of sounds or filenames @param kwargs additional parameters for `concatenate_audioclips <https://github.com/Zulko/moviepy/blob/master/moviepy/audio/AudioClip.py#L308>`_ @return :epkg:`AudioClip` Example: :: from code_beatrix.art.video import audio_concatenate son = audio_concatenate('son1.mp3', 'son2.mp3') """ ctx = [AudioContext(_).__enter__() for _ in audio_or_files] res = concatenate_audioclips([get_wrapped(_) for _ in ctx], **kwargs) for _ in ctx: _.__exit__() return res
import os import moviepy.editor as mp from moviepy.audio.AudioClip import concatenate_audioclips music = ["friends.mp3", "laugh1.mp3", "normal.mp3", "men1.mp3", "beyond.mp3"] vclip = mp.VideoFileClip("gource.mp4") audio = concatenate_audioclips([ mp.AudioFileClip(os.path.join("music", file_name)) for file_name in music ]) vclip = vclip.set_audio(audio) vclip.write_videofile('pyecharts.mp4')
def cut_audio_silence(audio_clip, chunk_duration, threshold): loud_timings = get_loud_timings(audio_clip, chunk_duration, threshold) loud_sections = [ audio_clip.subclip(start, end) for start, end in loud_timings ] return concatenate_audioclips(loud_sections)
# sources video = VideoFileClip("D:\\upload\\video.mp4").subclip(0, 6).add_mask() video = video.resize(size) foto1 = (ImageClip("D:\\upload\\foto1.png").set_duration(3)) foto1 = foto1.resize(size) foto2 = (ImageClip("D:\\upload\\foto2.png").set_duration(3)) foto2 = foto2.resize(size) foto3 = (ImageClip("D:\\upload\\fotoend.jpg").set_duration(2)) foto3 = foto3.resize(size) num = video.duration + 3 numfin = num + 3 # creating the video videofinal = CompositeVideoClip([ foto1, # starts at t=0 video.set_start(3), foto2.set_start(num), foto3.set_start(numfin) ]) background_audio_clip = AudioFileClip("D:\\upload\\audio.mp3").subclip(0, 3) bg_music = concatenate_audioclips([background_audio_clip, video.audio]) videofinal = videofinal.set_audio(bg_music) videofinal.write_videofile("D:\\upload\\final.mp4")
def process(self): my_logger = MyBarLogger(self.message, self.progress) my_logger(message="Detectron2 - Initializing the predictor") # Detectron2 默认设置 cfg = get_cfg() # PointRend 设置 add_pointrend_config(cfg) # 从该文件读取PointRend的参数设置 cfg.merge_from_file( "projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml" ) # 阈值,若阈值过低推断速度会很慢 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # 读取预训练模型的权重参数 cfg.MODEL.WEIGHTS = "datasets/model_final_3c3198.pkl" if not torch.cuda.is_available(): cfg.MODEL.DEVICE = 'cpu' predictor = DefaultPredictor(cfg) # 逐帧过滤不需要的类别的数据,为to_mask转换做准备 def custom_frame(frame): _frame = frame.copy() output = predictor(_frame) instances = output['instances'].to('cpu') data = { 'classes': instances.pred_classes.numpy(), 'boxes': instances.pred_boxes.tensor.numpy(), 'masks': instances.pred_masks.numpy(), 'scores': instances.scores.numpy() } # 设定接收人类的数据 data = process(data, target_class=[class_names.index('person')]) result = custom_show(_frame, data['masks']) return result # 以最终帧高度为准,使所有目标素材的帧高度相同,在concatenate的compose模式下可保证不因拉伸而失真 for i in range(len(self.targets)): self.targets[i] = self.targets[i].fx(vfx.resize, height=self.height) for i in range(len(self.backgrounds)): self.backgrounds[i] = self.backgrounds[i].fx( vfx.resize, (self.width, self.height)) # concatenate简单拼接 target = concatenate_videoclips(self.targets, method="compose").without_audio() background = concatenate_videoclips(self.backgrounds).without_audio() # 计算总时长,若有音频则拼接音频 audio = None duration = min(target.duration, background.duration) if self.audios: audio = concatenate_audioclips(self.audios) duration = min(target.duration, background.duration, audio.duration) # 把目标的识别结果——size为(n,w,h)的ndarray转换为mask,该mask表明它所属的片段哪些部分在背景上可见 mask_clip = target.fl_image(custom_frame).to_mask() # 在目标或背景上进行高斯模糊 if self.gauss_target: target = target.fl_image(blur) if self.gauss_background: background = background.fl_image(blur) # 在目标上添加抖音效果 if self.tiktok: target = target.fl_image(tiktok_effect) # 分身效果 if self.triple: temp = self.triple_effect(target, mask_clip, width=self.width, height=self.height) temp.insert(0, background) else: # set_mask使得被识别为True的部分在背景上可见 target = target.set_mask(mask_clip).set_position( "center", "center") temp = [background, target] # 拼接所有目标素材 final_clip = CompositeVideoClip(temp).set_audio(audio). \ set_duration(duration) if audio else CompositeVideoClip(temp).set_duration(duration) # 导出为文件 final_clip.write_videofile( f'./output/{time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())}.mp4', fps=30, codec='mpeg4', bitrate="8000k", audio_codec="libmp3lame", threads=4, logger=my_logger) self.finish_process.emit()