Esempio n. 1
0
def test_multiply_volume():
    clip = AudioFileClip("media/crunching.mp3")
    clip_array = clip.to_soundarray()

    # stereo mute
    clip_muted = multiply_volume(clip, 0)

    left_channel_muted = clip_muted.to_soundarray()[:, 0]
    right_channel_muted = clip_muted.to_soundarray()[:, 1]

    z_channel = np.zeros(len(left_channel_muted))

    assert np.array_equal(left_channel_muted, z_channel)
    assert np.array_equal(right_channel_muted, z_channel)

    # stereo level doubled
    clip_doubled = multiply_volume(clip, 2)
    clip_doubled_array = clip_doubled.to_soundarray()
    left_channel_doubled = clip_doubled_array[:, 0]
    right_channel_doubled = clip_doubled_array[:, 1]
    expected_left_channel_doubled = clip_array[:, 0] * 2
    expected_right_channel_doubled = clip_array[:, 1] * 2

    assert np.array_equal(left_channel_doubled, expected_left_channel_doubled)
    assert np.array_equal(right_channel_doubled,
                          expected_right_channel_doubled)

    # mono muted
    sinus_wave = lambda t: [np.sin(440 * 2 * np.pi * t)]
    mono_clip = AudioClip(sinus_wave, duration=1, fps=22050)
    muted_mono_clip = multiply_volume(mono_clip, 0)
    mono_channel_muted = muted_mono_clip.to_soundarray()

    z_channel = np.zeros(len(mono_channel_muted))
    assert np.array_equal(mono_channel_muted, z_channel)

    mono_clip = AudioClip(sinus_wave, duration=1, fps=22050)
    doubled_mono_clip = multiply_volume(mono_clip, 2)
    mono_channel_doubled = doubled_mono_clip.to_soundarray()
    d_channel = mono_clip.to_soundarray() * 2
    assert np.array_equal(mono_channel_doubled, d_channel)

    close_all_clips(locals())
Esempio n. 2
0
def test_multiply_volume_videoclip():
    start_time, end_time = (0.1, 0.2)

    clip = multiply_volume(
        VideoFileClip("media/chaplin.mp4").subclip(0, 0.3),
        0,
        start_time=start_time,
        end_time=end_time,
    )
    clip_soundarray = clip.audio.to_soundarray()

    assert len(clip_soundarray)

    expected_silence = np.zeros(clip_soundarray.shape[1])

    for i, frame in enumerate(clip_soundarray):
        t = i / clip.audio.fps
        if start_time <= t <= end_time:
            assert np.array_equal(frame, expected_silence)
        else:
            assert not np.array_equal(frame, expected_silence)
Esempio n. 3
0
def test_audio_delay(stereo_wave, duration, offset, n_repeats, decay):
    """Check that creating a short pulse of audio, the delay converts to a sound
    with the volume level in the form `-_-_-_-_-`, being `-` pulses expressed by
    `duration` argument and `_` being chunks of muted audio. Keep in mind that this
    way of test the FX only works if `duration <= offset`, but as does not make sense
    create a delay with `duration > offset`, this is enough for our purposes.

    Note that decayment values are not tested here, but are created using
    `multiply_volume`, should be OK.
    """
    # limits of this test
    assert n_repeats > 0  # some repetition, if not does not make sense
    assert duration <= offset  # avoid wave distortion
    assert not offset * 1000000 % 2  # odd offset -> no accurate muted chunk size

    # stereo audio clip
    clip = AudioClip(
        make_frame=stereo_wave(left_freq=440, right_freq=880),
        duration=duration,
        fps=44100,
    )
    clip_array = clip.to_soundarray()

    # stereo delayed clip
    delayed_clip = audio_delay(clip,
                               offset=offset,
                               n_repeats=n_repeats,
                               decay=decay)
    delayed_clip_array = delayed_clip.to_soundarray()

    # size of chunks with audios
    sound_chunk_size = clip_array.shape[0]
    # muted chunks size
    muted_chunk_size = int(
        sound_chunk_size * offset / duration) - sound_chunk_size

    zeros_expected_chunk_as_muted = np.zeros((muted_chunk_size, 2))

    decayments = np.linspace(1, max(0, decay), n_repeats)

    for i in range(n_repeats +
                   1):  # first clip, is not part of the repeated ones

        if i == n_repeats:
            # the delay ends in sound, so last muted chunk does not exists
            break

        # sound chunk
        sound_start_at = i * sound_chunk_size + i * muted_chunk_size
        sound_ends_at = sound_start_at + sound_chunk_size

        # first sound chunk
        if i == 0:
            assert np.array_equal(
                delayed_clip_array[:, :][sound_start_at:sound_ends_at],
                multiply_volume(clip, decayments[i]).to_soundarray(),
            )

        # muted chunk
        mute_starts_at = sound_ends_at + 1
        mute_ends_at = mute_starts_at + muted_chunk_size

        assert np.array_equal(
            delayed_clip_array[:, :][mute_starts_at:mute_ends_at],
            zeros_expected_chunk_as_muted,
        )

        # check muted bounds
        assert not np.array_equal(
            delayed_clip_array[:, :][mute_starts_at - 1:mute_ends_at],
            zeros_expected_chunk_as_muted,
        )

        assert not np.array_equal(
            delayed_clip_array[:, :][mute_starts_at:mute_ends_at + 1],
            zeros_expected_chunk_as_muted,
        )
Esempio n. 4
0
def test_multiply_volume_audioclip(
    sound_type,
    factor,
    duration,
    start_time,
    end_time,
):
    if sound_type == "stereo":
        make_frame = lambda t: np.array([
            np.sin(440 * 2 * np.pi * t),
            np.sin(160 * 2 * np.pi * t),
        ]).T.copy(order="C")
    else:
        make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)]

    clip = AudioClip(
        make_frame,
        duration=duration if duration else 0.1,
        fps=22050,
    )
    clip_array = clip.to_soundarray()

    clip_transformed = multiply_volume(
        clip,
        factor,
        start_time=start_time,
        end_time=end_time,
    )
    clip_transformed_array = clip_transformed.to_soundarray()

    assert len(clip_transformed_array)

    if hasattr(clip_array, "shape") and len(clip_array.shape) > 1:
        # stereo clip
        left_channel_transformed = clip_transformed_array[:, 0]
        right_channel_transformed = clip_transformed_array[:, 1]

        if start_time is None and end_time is None:
            expected_left_channel_transformed = clip_array[:, 0] * factor
            expected_right_channel_transformed = clip_array[:, 1] * factor
        else:
            start_time = convert_to_seconds(
                start_time) if start_time else clip.start
            end_time = convert_to_seconds(end_time) if end_time else clip.end

            expected_left_channel_transformed = np.array([])
            expected_right_channel_transformed = np.array([])
            for i, frame in enumerate(clip_array):
                t = i / clip.fps
                transformed_frame = frame * (
                    factor if start_time <= t <= end_time else 1)
                expected_left_channel_transformed = np.append(
                    expected_left_channel_transformed,
                    transformed_frame[0],
                )
                expected_right_channel_transformed = np.append(
                    expected_right_channel_transformed,
                    transformed_frame[1],
                )

        assert len(left_channel_transformed)
        assert len(expected_left_channel_transformed)
        assert np.array_equal(
            left_channel_transformed,
            expected_left_channel_transformed,
        )

        assert len(right_channel_transformed)
        assert len(expected_right_channel_transformed)
        assert np.array_equal(
            right_channel_transformed,
            expected_right_channel_transformed,
        )

    else:
        # mono clip

        if start_time is None and end_time is None:
            expected_clip_transformed_array = clip_array * factor
        else:
            start_time = convert_to_seconds(
                start_time) if start_time else clip.start
            end_time = convert_to_seconds(end_time) if end_time else clip.end

            expected_clip_transformed_array = np.array([])
            for i, frame in enumerate(clip_array[0]):
                t = i / clip.fps
                transformed_frame = frame * (
                    factor if start_time <= t <= end_time else 1)
                expected_clip_transformed_array = np.append(
                    expected_clip_transformed_array,
                    transformed_frame,
                )
            expected_clip_transformed_array = np.array([
                expected_clip_transformed_array,
            ])

        assert len(expected_clip_transformed_array)

        assert np.array_equal(
            expected_clip_transformed_array,
            clip_transformed_array,
        )