def test_multiply_volume_audioclip( sound_type, factor, duration, start_time, end_time, ): if sound_type == "stereo": make_frame = lambda t: np.array([ np.sin(440 * 2 * np.pi * t), np.sin(160 * 2 * np.pi * t), ]).T.copy(order="C") else: make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)] clip = AudioClip( make_frame, duration=duration if duration else 0.1, fps=22050, ) clip_array = clip.to_soundarray() clip_transformed = multiply_volume( clip, factor, start_time=start_time, end_time=end_time, ) clip_transformed_array = clip_transformed.to_soundarray() assert len(clip_transformed_array) if hasattr(clip_array, "shape") and len(clip_array.shape) > 1: # stereo clip left_channel_transformed = clip_transformed_array[:, 0] right_channel_transformed = clip_transformed_array[:, 1] if start_time is None and end_time is None: expected_left_channel_transformed = clip_array[:, 0] * factor expected_right_channel_transformed = clip_array[:, 1] * factor else: start_time = convert_to_seconds( start_time) if start_time else clip.start end_time = convert_to_seconds(end_time) if end_time else clip.end expected_left_channel_transformed = np.array([]) expected_right_channel_transformed = np.array([]) for i, frame in enumerate(clip_array): t = i / clip.fps transformed_frame = frame * ( factor if start_time <= t <= end_time else 1) expected_left_channel_transformed = np.append( expected_left_channel_transformed, transformed_frame[0], ) expected_right_channel_transformed = np.append( expected_right_channel_transformed, transformed_frame[1], ) assert len(left_channel_transformed) assert len(expected_left_channel_transformed) assert np.array_equal( left_channel_transformed, expected_left_channel_transformed, ) assert len(right_channel_transformed) assert len(expected_right_channel_transformed) assert np.array_equal( right_channel_transformed, expected_right_channel_transformed, ) else: # mono clip if start_time is None and end_time is None: expected_clip_transformed_array = clip_array * factor else: start_time = convert_to_seconds( start_time) if start_time else clip.start end_time = convert_to_seconds(end_time) if end_time else clip.end expected_clip_transformed_array = np.array([]) for i, frame in enumerate(clip_array[0]): t = i / clip.fps transformed_frame = frame * ( factor if start_time <= t <= end_time else 1) expected_clip_transformed_array = np.append( expected_clip_transformed_array, transformed_frame, ) expected_clip_transformed_array = np.array([ expected_clip_transformed_array, ]) assert len(expected_clip_transformed_array) assert np.array_equal( expected_clip_transformed_array, clip_transformed_array, )
def test_audio_delay(stereo_wave, duration, offset, n_repeats, decay): """Check that creating a short pulse of audio, the delay converts to a sound with the volume level in the form `-_-_-_-_-`, being `-` pulses expressed by `duration` argument and `_` being chunks of muted audio. Keep in mind that this way of test the FX only works if `duration <= offset`, but as does not make sense create a delay with `duration > offset`, this is enough for our purposes. Note that decayment values are not tested here, but are created using `multiply_volume`, should be OK. """ # limits of this test assert n_repeats > 0 # some repetition, if not does not make sense assert duration <= offset # avoid wave distortion assert not offset * 1000000 % 2 # odd offset -> no accurate muted chunk size # stereo audio clip clip = AudioClip( make_frame=stereo_wave(left_freq=440, right_freq=880), duration=duration, fps=44100, ) clip_array = clip.to_soundarray() # stereo delayed clip delayed_clip = audio_delay(clip, offset=offset, n_repeats=n_repeats, decay=decay) delayed_clip_array = delayed_clip.to_soundarray() # size of chunks with audios sound_chunk_size = clip_array.shape[0] # muted chunks size muted_chunk_size = int( sound_chunk_size * offset / duration) - sound_chunk_size zeros_expected_chunk_as_muted = np.zeros((muted_chunk_size, 2)) decayments = np.linspace(1, max(0, decay), n_repeats) for i in range(n_repeats + 1): # first clip, is not part of the repeated ones if i == n_repeats: # the delay ends in sound, so last muted chunk does not exists break # sound chunk sound_start_at = i * sound_chunk_size + i * muted_chunk_size sound_ends_at = sound_start_at + sound_chunk_size # first sound chunk if i == 0: assert np.array_equal( delayed_clip_array[:, :][sound_start_at:sound_ends_at], multiply_volume(clip, decayments[i]).to_soundarray(), ) # muted chunk mute_starts_at = sound_ends_at + 1 mute_ends_at = mute_starts_at + muted_chunk_size assert np.array_equal( delayed_clip_array[:, :][mute_starts_at:mute_ends_at], zeros_expected_chunk_as_muted, ) # check muted bounds assert not np.array_equal( delayed_clip_array[:, :][mute_starts_at - 1:mute_ends_at], zeros_expected_chunk_as_muted, ) assert not np.array_equal( delayed_clip_array[:, :][mute_starts_at:mute_ends_at + 1], zeros_expected_chunk_as_muted, )
def test_audio_normalize_muted(): z_array = np.array([0.0]) make_frame = lambda t: z_array clip = AudioClip(make_frame, duration=1, fps=44100) clip = audio_normalize(clip) assert np.array_equal(clip.to_soundarray(), z_array)