def test_audio_data_without_normalization_raises_for_invalid_data(self): nt.assert_raises( ValueError, lambda: display.Audio([1.001], rate=44100, normalize=False)) nt.assert_raises( ValueError, lambda: display.Audio([-1.001], rate=44100, normalize=False))
def test_audio_data_without_normalization(self): max_int16 = numpy.iinfo(numpy.int16).max for scale in [1, 0.5, 0.2]: test_tone = get_test_tone(scale) test_tone_max_abs = numpy.max(numpy.abs(test_tone)) expected_max_value = int(max_int16 * test_tone_max_abs) audio = display.Audio(test_tone, rate=44100, normalize=False) actual_max_value = numpy.max(numpy.abs(read_wav(audio.data))) nt.assert_equal(actual_max_value, expected_max_value)
def test_audio_from_file(): path = pjoin(dirname(__file__), 'test.wav') display.Audio(filename=path)
fname = 'f1.wav' rate, data = scipy.io.wavfile.read(fname) data = data.astype(np.float) t = np.linspace(0, len(data)/rate, len(data)) plt.plot(t,data) plt.ylabel('Magnitude') plt.xlabel('Tempo (s)') plt.show() c = spec.Wav2Spectrogram() # Objeto que converte arquivos wav para espectrogramas s = c.convert(open(fname, 'rb'), window_length=2048, window_step=1024, spectrum_type='log') tr = trim.TrimSpectrogram() s = tr.trim(s, min_freq=0, max_freq=5000) d = s.data d = d/np.max(d) d = 1 - d min_freq = s.metadata.min_freq max_freq = s.metadata.max_freq min_time = s.metadata.min_time max_time = s.metadata.max_time im = plt.imshow(d, aspect='auto', origin='lower', cmap=plt.cm.gray, extent=[min_time, max_time, min_freq/1000.0, max_freq/1000.0]) plt.xlabel('Time (s)') plt.ylabel('Frequency (kHz)') plt.show() display.Audio(fname)
def test_audio_raises_for_nested_list(self): stereo_signal = [list(get_test_tone())] * 2 nt.assert_raises(TypeError, lambda: display.Audio(stereo_signal, rate=44100))
def test_audio_data_normalization(self): expected_max_value = numpy.iinfo(numpy.int16).max for scale in [1, 0.5, 2]: audio = display.Audio(get_test_tone(scale), rate=44100) actual_max_value = numpy.max(numpy.abs(read_wav(audio.data))) nt.assert_equal(actual_max_value, expected_max_value)
def test_audio_from_list(self): test_tone = get_test_tone() audio = display.Audio(list(test_tone), rate=44100) nt.assert_equal(len(read_wav(audio.data)), len(test_tone))
def test_audio_from_numpy_array(self): test_tone = get_test_tone() audio = display.Audio(test_tone, rate=44100) nt.assert_equal(len(read_wav(audio.data)), len(test_tone))
def test_audio_from_list(self): test_tone = get_test_tone() audio = display.Audio(list(test_tone), rate=44100) assert len(read_wav(audio.data)) == len(test_tone)
# Beat tracking example from __future__ import print_function import librosa import IPython.lib.display as player import pygame as pg import tkinter # 1. Get the file path to the included audio example filename = "test3.mp3" # 2. Load the audio as a waveform `y` # Store the sampling rate as `sr` y, sr = librosa.load(filename) player.Audio(filename=filename) # 3. Run the default beat tracker tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr) # Run onset tracker onset_frames = librosa.onset.onset_detect(y=y, sr=sr) print('Estimated tempo: {:.2f} beats per minute'.format(tempo)) # 4. Convert the frame indices of beat events into timestamps beat_times = librosa.frames_to_time(beat_frames, sr=sr) onset_times = librosa.frames_to_time(onset_frames, sr=sr) print('Saving output to beat_times.csv') librosa.output.times_csv('beat_times.csv', beat_times) librosa.output.times_csv('onset_times.csv', onset_times)