def test_audio_normalize_muted(): z_array = np.array([0.0]) make_frame = lambda t: z_array clip = AudioClip(make_frame, duration=1, fps=44100) clip = audio_normalize(clip) assert np.array_equal(clip.to_soundarray(), z_array) close_all_clips(locals())
def test_loop(util, video): clip = BitmapClip([["R"], ["G"], ["B"]], fps=1) clip1 = loop(clip, n=2) # loop 2 times target1 = BitmapClip([["R"], ["G"], ["B"], ["R"], ["G"], ["B"]], fps=1) assert clip1 == target1 clip2 = loop(clip, duration=8) # loop 8 seconds target2 = BitmapClip( [["R"], ["G"], ["B"], ["R"], ["G"], ["B"], ["R"], ["G"]], fps=1) assert clip2 == target2 clip3 = loop(clip).with_duration(5) # infinite loop target3 = BitmapClip([["R"], ["G"], ["B"], ["R"], ["G"]], fps=1) assert clip3 == target3 clip = video(start_time=0.2, end_time=0.3) # 0.1 seconds long clip1 = loop(clip).with_duration(0.5) # infinite looping clip1.write_videofile(os.path.join(util.TMP_DIR, "loop1.webm")) clip2 = loop(clip, duration=0.5) # loop for 1 second clip2.write_videofile(os.path.join(util.TMP_DIR, "loop2.webm")) clip3 = loop(clip, n=3) # loop 3 times clip3.write_videofile(os.path.join(util.TMP_DIR, "loop3.webm")) # Test audio looping clip = AudioClip(lambda t: np.sin(440 * 2 * np.pi * t) * (t % 1) + 0.5, duration=2.5, fps=44100) clip1 = clip.loop(2)
def test_loop(): clip = BitmapClip([["R"], ["G"], ["B"]], fps=1) clip1 = loop(clip, n=2) # loop 2 times target1 = BitmapClip([["R"], ["G"], ["B"], ["R"], ["G"], ["B"]], fps=1) assert clip1 == target1 clip2 = loop(clip, duration=8) # loop 8 seconds target2 = BitmapClip( [["R"], ["G"], ["B"], ["R"], ["G"], ["B"], ["R"], ["G"]], fps=1) assert clip2 == target2 clip3 = loop(clip).with_duration(5) # infinite loop target3 = BitmapClip([["R"], ["G"], ["B"], ["R"], ["G"]], fps=1) assert clip3 == target3 clip = get_test_video().subclip(0.2, 0.3) # 0.1 seconds long clip1 = loop(clip).with_duration(0.5) # infinite looping clip1.write_videofile(os.path.join(TMP_DIR, "loop1.webm")) clip2 = loop(clip, duration=0.5) # loop for 1 second clip2.write_videofile(os.path.join(TMP_DIR, "loop2.webm")) clip3 = loop(clip, n=3) # loop 3 times clip3.write_videofile(os.path.join(TMP_DIR, "loop3.webm")) # Test audio looping clip = AudioClip(lambda t: np.sin(440 * 2 * np.pi * t) * (t % 1) + 0.5, duration=2.5, fps=44100) clip1 = clip.loop(2) # TODO fix AudioClip.__eq__() # assert concatenate_audioclips([clip, clip]) == clip1 close_all_clips(objects=locals())
def test_multiply_volume(): clip = AudioFileClip("media/crunching.mp3") clip_array = clip.to_soundarray() # stereo mute clip_muted = multiply_volume(clip, 0) left_channel_muted = clip_muted.to_soundarray()[:, 0] right_channel_muted = clip_muted.to_soundarray()[:, 1] z_channel = np.zeros(len(left_channel_muted)) assert np.array_equal(left_channel_muted, z_channel) assert np.array_equal(right_channel_muted, z_channel) # stereo level doubled clip_doubled = multiply_volume(clip, 2) clip_doubled_array = clip_doubled.to_soundarray() left_channel_doubled = clip_doubled_array[:, 0] right_channel_doubled = clip_doubled_array[:, 1] expected_left_channel_doubled = clip_array[:, 0] * 2 expected_right_channel_doubled = clip_array[:, 1] * 2 assert np.array_equal(left_channel_doubled, expected_left_channel_doubled) assert np.array_equal(right_channel_doubled, expected_right_channel_doubled) # mono muted sinus_wave = lambda t: [np.sin(440 * 2 * np.pi * t)] mono_clip = AudioClip(sinus_wave, duration=1, fps=22050) muted_mono_clip = multiply_volume(mono_clip, 0) mono_channel_muted = muted_mono_clip.to_soundarray() z_channel = np.zeros(len(mono_channel_muted)) assert np.array_equal(mono_channel_muted, z_channel) mono_clip = AudioClip(sinus_wave, duration=1, fps=22050) doubled_mono_clip = multiply_volume(mono_clip, 2) mono_channel_doubled = doubled_mono_clip.to_soundarray() d_channel = mono_clip.to_soundarray() * 2 assert np.array_equal(mono_channel_doubled, d_channel) close_all_clips(locals())
def test_multiply_stereo_volume(): clip = AudioFileClip("media/crunching.mp3") # mute clip_left_channel_muted = multiply_stereo_volume(clip, left=0) clip_right_channel_muted = multiply_stereo_volume(clip, right=0, left=2) left_channel_muted = clip_left_channel_muted.to_soundarray()[:, 0] right_channel_muted = clip_right_channel_muted.to_soundarray()[:, 1] z_channel = np.zeros(len(left_channel_muted)) assert np.array_equal(left_channel_muted, z_channel) assert np.array_equal(right_channel_muted, z_channel) # double level left_channel_doubled = clip_right_channel_muted.to_soundarray()[:, 0] d_channel = clip.to_soundarray()[:, 0] * 2 assert np.array_equal(left_channel_doubled, d_channel) # mono muted sinus_wave = lambda t: [np.sin(440 * 2 * np.pi * t)] mono_clip = AudioClip(sinus_wave, duration=2, fps=22050) muted_mono_clip = multiply_stereo_volume(mono_clip, left=0) mono_channel_muted = muted_mono_clip.to_soundarray() z_channel = np.zeros(len(mono_channel_muted)) assert np.array_equal(mono_channel_muted, z_channel) # mono doubled mono_clip = AudioClip(sinus_wave, duration=2, fps=22050) doubled_mono_clip = multiply_stereo_volume( mono_clip, left=None, right=2 ) # using right mono_channel_doubled = doubled_mono_clip.to_soundarray() d_channel = mono_clip.to_soundarray() * 2 assert np.array_equal(mono_channel_doubled, d_channel) close_all_clips(locals())
def test_audio_fadein(sound_type, fps, clip_duration, fadein_duration): if sound_type == "stereo": make_frame = lambda t: np.array( [np.sin(440 * 2 * np.pi * t), np.sin(160 * 2 * np.pi * t)]).T.copy(order="C") else: make_frame = lambda t: np.sin(440 * 2 * np.pi * t) clip = AudioClip(make_frame, duration=clip_duration, fps=fps) new_clip = audio_fadein(clip, fadein_duration) # first frame is muted first_frame = new_clip.get_frame(0) if sound_type == "stereo": assert len(first_frame) > 1 for value in first_frame: assert value == 0.0 else: assert first_frame == 0.0 fadein_duration = convert_to_seconds(fadein_duration) n_parts = 10 # cut transformed part into subclips and check the expected max_volume for # each one time_foreach_part = fadein_duration / n_parts start_times = np.arange(0, fadein_duration, time_foreach_part) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume() possible_value = (i + 1) / n_parts assert round(subclip_max_volume, 2) in [ possible_value, round(possible_value - 0.01, 5), ] # cut non transformed part into subclips and check the expected max_volume # for each one (almost 1) time_foreach_part = (clip_duration - fadein_duration) / n_parts start_times = np.arange(fadein_duration, clip_duration, time_foreach_part) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume() assert round(subclip_max_volume, 4) == 1
def test_audio_fadeout(mono_wave, stereo_wave, sound_type, fps, clip_duration, fadeout_duration): if sound_type == "stereo": make_frame = stereo_wave(left_freq=440, right_freq=160) else: make_frame = mono_wave(440) clip = AudioClip(make_frame, duration=clip_duration, fps=fps) new_clip = audio_fadeout(clip, fadeout_duration) fadeout_duration = convert_to_seconds(fadeout_duration) n_parts = 10 # cut transformed part into subclips and check the expected max_volume for # each one time_foreach_part = fadeout_duration / n_parts start_times = np.arange( clip_duration - fadeout_duration, clip_duration, time_foreach_part, ) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume() possible_value = 1 - i * 0.1 assert round(subclip_max_volume, 2) in [ round(possible_value, 2), round(possible_value - 0.01, 5), ] # cut non transformed part into subclips and check the expected max_volume # for each one (almost 1) time_foreach_part = (clip_duration - fadeout_duration) / n_parts start_times = np.arange(0, clip_duration - fadeout_duration, time_foreach_part) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part subclip_max_volume = new_clip.subclip(start_time, end_time).max_volume() assert round(subclip_max_volume, 4) == 1
def test_audio_delay(stereo_wave, duration, offset, n_repeats, decay): """Check that creating a short pulse of audio, the delay converts to a sound with the volume level in the form `-_-_-_-_-`, being `-` pulses expressed by `duration` argument and `_` being chunks of muted audio. Keep in mind that this way of test the FX only works if `duration <= offset`, but as does not make sense create a delay with `duration > offset`, this is enough for our purposes. Note that decayment values are not tested here, but are created using `multiply_volume`, should be OK. """ # limits of this test assert n_repeats > 0 # some repetition, if not does not make sense assert duration <= offset # avoid wave distortion assert not offset * 1000000 % 2 # odd offset -> no accurate muted chunk size # stereo audio clip clip = AudioClip( make_frame=stereo_wave(left_freq=440, right_freq=880), duration=duration, fps=44100, ) clip_array = clip.to_soundarray() # stereo delayed clip delayed_clip = audio_delay(clip, offset=offset, n_repeats=n_repeats, decay=decay) delayed_clip_array = delayed_clip.to_soundarray() # size of chunks with audios sound_chunk_size = clip_array.shape[0] # muted chunks size muted_chunk_size = int( sound_chunk_size * offset / duration) - sound_chunk_size zeros_expected_chunk_as_muted = np.zeros((muted_chunk_size, 2)) decayments = np.linspace(1, max(0, decay), n_repeats) for i in range(n_repeats + 1): # first clip, is not part of the repeated ones if i == n_repeats: # the delay ends in sound, so last muted chunk does not exists break # sound chunk sound_start_at = i * sound_chunk_size + i * muted_chunk_size sound_ends_at = sound_start_at + sound_chunk_size # first sound chunk if i == 0: assert np.array_equal( delayed_clip_array[:, :][sound_start_at:sound_ends_at], multiply_volume(clip, decayments[i]).to_soundarray(), ) # muted chunk mute_starts_at = sound_ends_at + 1 mute_ends_at = mute_starts_at + muted_chunk_size assert np.array_equal( delayed_clip_array[:, :][mute_starts_at:mute_ends_at], zeros_expected_chunk_as_muted, ) # check muted bounds assert not np.array_equal( delayed_clip_array[:, :][mute_starts_at - 1:mute_ends_at], zeros_expected_chunk_as_muted, ) assert not np.array_equal( delayed_clip_array[:, :][mute_starts_at:mute_ends_at + 1], zeros_expected_chunk_as_muted, )
def test_multiply_volume_audioclip( sound_type, factor, duration, start_time, end_time, ): if sound_type == "stereo": make_frame = lambda t: np.array([ np.sin(440 * 2 * np.pi * t), np.sin(160 * 2 * np.pi * t), ]).T.copy(order="C") else: make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)] clip = AudioClip( make_frame, duration=duration if duration else 0.1, fps=22050, ) clip_array = clip.to_soundarray() clip_transformed = multiply_volume( clip, factor, start_time=start_time, end_time=end_time, ) clip_transformed_array = clip_transformed.to_soundarray() assert len(clip_transformed_array) if hasattr(clip_array, "shape") and len(clip_array.shape) > 1: # stereo clip left_channel_transformed = clip_transformed_array[:, 0] right_channel_transformed = clip_transformed_array[:, 1] if start_time is None and end_time is None: expected_left_channel_transformed = clip_array[:, 0] * factor expected_right_channel_transformed = clip_array[:, 1] * factor else: start_time = convert_to_seconds( start_time) if start_time else clip.start end_time = convert_to_seconds(end_time) if end_time else clip.end expected_left_channel_transformed = np.array([]) expected_right_channel_transformed = np.array([]) for i, frame in enumerate(clip_array): t = i / clip.fps transformed_frame = frame * ( factor if start_time <= t <= end_time else 1) expected_left_channel_transformed = np.append( expected_left_channel_transformed, transformed_frame[0], ) expected_right_channel_transformed = np.append( expected_right_channel_transformed, transformed_frame[1], ) assert len(left_channel_transformed) assert len(expected_left_channel_transformed) assert np.array_equal( left_channel_transformed, expected_left_channel_transformed, ) assert len(right_channel_transformed) assert len(expected_right_channel_transformed) assert np.array_equal( right_channel_transformed, expected_right_channel_transformed, ) else: # mono clip if start_time is None and end_time is None: expected_clip_transformed_array = clip_array * factor else: start_time = convert_to_seconds( start_time) if start_time else clip.start end_time = convert_to_seconds(end_time) if end_time else clip.end expected_clip_transformed_array = np.array([]) for i, frame in enumerate(clip_array[0]): t = i / clip.fps transformed_frame = frame * ( factor if start_time <= t <= end_time else 1) expected_clip_transformed_array = np.append( expected_clip_transformed_array, transformed_frame, ) expected_clip_transformed_array = np.array([ expected_clip_transformed_array, ]) assert len(expected_clip_transformed_array) assert np.array_equal( expected_clip_transformed_array, clip_transformed_array, )