def test_percussive(): percussive_audio = percussive_separation(mono_audio) test_percussive = librosa.effects.percussive(librosa.to_mono( mono_audio.raw_samples), margin=3.0) test_percussive_audio = Audio(raw_samples=test_percussive, sample_rate=mono_audio.sample_rate) assert np.allclose(percussive_audio.raw_samples, test_percussive_audio.raw_samples, rtol=1e-3, atol=1e-4)
def test_time_stretch(): stretch_amount = 1.5 time_stretch_audio = time_stretch(mono_audio, stretch_amount) test_time_stretch = librosa.effects.time_stretch( librosa.to_mono(mono_audio.raw_samples), stretch_amount) test_time_stretch_audio = Audio(raw_samples=test_time_stretch, sample_rate=mono_audio.sample_rate) assert np.allclose(time_stretch_audio.raw_samples, test_time_stretch_audio.raw_samples, rtol=1e-3, atol=1e-4)
def test_harmonic(): harmonic_audio = harmonic_separation(mono_audio) test_harmonic = librosa.effects.harmonic(librosa.to_mono( mono_audio.raw_samples), margin=3.0) test_harmonic_audio = Audio(raw_samples=test_harmonic, sample_rate=mono_audio.sample_rate) assert np.allclose(harmonic_audio.raw_samples, test_harmonic_audio.raw_samples, rtol=1e-3, atol=1e-4)
def run(file, output): from amen.audio import Audio # from amen.utils import example_audio_file from amen.synthesize import synthesize audio_file = file # INPUT FILE HERE audio = Audio(audio_file) beats = audio.timings['beats'] beats.reverse() # output out = synthesize(beats) out.output(output)
def run(file, output): from amen.audio import Audio # from amen.utils import example_audio_file from amen.synthesize import synthesize audio_file = file # INPUT FILE HERE audio = Audio(audio_file) beats = audio.timings['beats'] new_beats = [] for i, beat in enumerate(beats): if i % 3 == 0: new_beats.append(beat) out = synthesize(new_beats) out.output(output)
def make_audio(filepath): """ Helper function to generate the Echo Nest style AudioAnalysis object, for easier testing. Parameters --------- filepath : str Path to the local file to be analyzed. Returns ------ AudioAnalysis """ audio = Audio(filepath) return AudioAnalysis(audio)
def test_pitch_shift(): shift_amount = 4 step_size = 24 pitch_shift_audio = pitch_shift(mono_audio, shift_amount, step_size=step_size) test_pitch_shift = librosa.effects.pitch_shift(librosa.to_mono( mono_audio.raw_samples), mono_audio.sample_rate, shift_amount, bins_per_octave=step_size) test_pitch_shift_audio = Audio(raw_samples=test_pitch_shift, sample_rate=mono_audio.sample_rate) assert np.allclose(pitch_shift_audio.raw_samples, test_pitch_shift_audio.raw_samples, rtol=1e-3, atol=1e-4)
#!/usr/bin/env python # encoding: utf=8 """ reverse.py : Reverse the beats of a song. """ from amen.audio import Audio from amen.utils import example_audio_file from amen.synthesize import synthesize audio_file = example_audio_file() audio = Audio(audio_file) beats = audio.timings['beats'] beats.reverse() out = synthesize(beats) out.output('reversed.wav')
def test_time(): assert time_slice.time == pd.to_timedelta(t, 's') def test_duration(): assert time_slice.duration == pd.to_timedelta(d, 's') def test_units(): time_slice = TimeSlice(t, d, dummy_audio, unit='ms') assert time_slice.time == pd.to_timedelta(t, 'ms') EXAMPLE_FILE = example_audio_file() stereo_audio = Audio(EXAMPLE_FILE) time_slice = TimeSlice(t, d, stereo_audio) EXAMPLE_MONO_FILE = example_mono_audio_file() mono_audio = Audio(EXAMPLE_FILE) def test_get_offsets(): left, right = time_slice._get_offsets(3, 4, stereo_audio.num_channels) assert left == (-1, 3) def test_offset_samples_mono(): res = mono_audio.timings['beats'][0]._offset_samples( 1, 2, (-1, 1), (-1, 1), mono_audio.num_channels) assert res.shape == (2, 3)
#!/usr/bin/env python # -*- coding: utf-8 -*- from amen.audio import Audio from amen.utils import example_audio_file from amen.timing import TimingList from nose.tools import eq_ EXAMPLE_FILE = example_audio_file() AUDIO = Audio(EXAMPLE_FILE) def test_track(): track = AUDIO.timings['track'] assert isinstance(track, TimingList) eq_(len(track), 1) def test_beats(): beats = AUDIO.timings['beats'] assert isinstance(beats, TimingList) eq_(len(beats), 11) def test_segments(): segments = AUDIO.timings['segments'] assert isinstance(segments, TimingList) eq_(len(segments), 42)
import os, sys import random from amen.utils import example_audio_file from amen.audio import Audio from amen.synthesize import synthesize d = "\\stuff\\music" print("test") aud = [] print(os.curdir) os.chdir(os.curdir + d) print(os.curdir) ff = os.listdir(os.curdir) for f in ff: print(f) aud.append(Audio(f)) beats = aud[0].timings['beats'] beats.extend(aud[1].timings['beats']) beats.extend(aud[2].timings['beats']) print(beats) random.shuffle(beats) print(beats) out1 = synthesize(beats[:((len(beats) // 2) - 1)]) out2 = synthesize(beats[((len(beats) // 2)):]) out1.output('1.wav') out1.output('2.wav')
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import tempfile import numpy as np import librosa from amen.audio import Audio from amen.feature import FeatureCollection from amen.utils import example_audio_file EXAMPLE_FILE = example_audio_file() audio = Audio(EXAMPLE_FILE) mono_audio = Audio(EXAMPLE_FILE, convert_to_mono=True, sample_rate=44100) def test_has_feature_collection(): assert (type(mono_audio.features) == FeatureCollection) def test_has_amplitude_feature(): res = librosa.feature.rmse(mono_audio.analysis_samples)[0] assert (mono_audio.features["amplitude"].data.iloc[0].item() == res[0]) def test_has_centroid_feature(): res = librosa.feature.spectral_centroid(mono_audio.analysis_samples)[0] assert (mono_audio.features["centroid"].data.iloc[0].item() == res[0]) def test_has_timbre_feature():
import librosa import numpy as np import pandas as pd from nose.tools import assert_raises from pandas.util.testing import assert_frame_equal from amen.audio import Audio from amen.feature import Feature from amen.feature import FeatureCollection from amen.timing import TimeSlice from amen.utils import example_audio_file from amen.exceptions import FeatureError EXAMPLE_FILE = example_audio_file() audio = Audio(EXAMPLE_FILE) test_times = np.linspace(0, 10, num=1000) test_index = pd.to_timedelta(test_times, unit='s') test_dataframe = pd.DataFrame(data=audio.analysis_samples[:1000], index=test_index) test_feature = Feature(test_dataframe) # Test init def test_data_validation(): # Makes sure that we can't pass lousy data. assert_raises(AssertionError, Feature, [1, 2, 3])
def test_audio_from_raw_samples(): new_audio = Audio(raw_samples=audio.raw_samples) assert np.allclose(new_audio.raw_samples, audio.raw_samples, rtol=1e-3, atol=1e-4)
# -*- coding: utf-8 -*- from nose.tools import assert_raises import six import pandas as pd import numpy as np import librosa from amen.audio import Audio from amen.utils import example_audio_file from amen.utils import example_mono_audio_file from amen.synthesize import _format_inputs from amen.synthesize import synthesize from amen.exceptions import SynthesizeError EXAMPLE_FILE = example_audio_file() audio = Audio(EXAMPLE_FILE) def test_format_inputs_length(): formatted_inputs = _format_inputs(audio.timings['beats']) formatted_inputs = list(formatted_inputs) assert (len(audio.timings['beats']) == len(formatted_inputs)) def test_format_inputs_list(): formatted_inputs = _format_inputs(audio.timings['beats']) formatted_inputs = list(formatted_inputs) beat = audio.timings['beats'][0] assert (formatted_inputs[0] == (audio.timings['beats'][0], beat.time))