fig.suptitle("Multi-pitch estimation using Klapuri's iterative method", fontsize=16) ax.set_title("Piano roll of Beethoven's \"Für Elise\"") for m in range(pitch.shape[0]): ax.scatter(time, pitch[m]) ax.set_xlabel('Time (s)') ax.set_ylabel('Pitch (MIDI)') ax.set_ylim(20, 109) plt.show() if __name__ == '__main__': if len(sys.argv) > 3: print("Wrong number of arguments", file=sys.stderr) print("Usage: python %s [filename] [duration_in_seconds]" % sys.argv[0], file=sys.stderr) sys.exit(1) elif len(sys.argv) > 1: audio_file = sys.argv[1] audio = AudioLoader(audio_file) if len(sys.argv) == 3: audio.cut(stop=sys.argv[2]) else: audio_file = "../samples/polyphonic/furElise.wav" audio = AudioLoader(audio_file) audio.cut(stop=10) print("Using first ", 10, " seconds of ", audio_file) demo_klapuri(audio)
from muallef.util.units import convertFreq, Hz_to_MIDI from muallef.util import normalize from matplotlib import pyplot as plt from sys import argv import numpy as np if len(argv) == 2: audio_file = argv[1] else: #audio_file = "samples/polyphonic/furElise.wav" #audio_file = 'samples/polyphonic/chopin_cut.wav' audio_file = 'samples/monophonic/czardas_cut.wav' audio = AudioLoader(audio_file) audio.cut(stop=10) fs = audio.sampleRate x = audio.signal yin = MonoPitch(x, fs, method='yin') yin_f0 = yin() yin_conf = yin.get_confidence(normalize=True) yinfft = MonoPitch(x, fs, method='yinfft') yinfft_f0 = yinfft() yinfft_conf = yinfft.get_confidence(normalize=True) time = audio.time(len(yin_f0)) fig, ax = plt.subplots() ax.scatter(time, yinfft_f0, c='red', s=10*yinfft_conf) ax.scatter(time, yin_f0, c='blue', s=10*yin_conf)
from muallef.io import AudioLoader from muallef.pitch import MultiPitch from muallef.util.units import Hz_to_MIDI import numpy as np import matplotlib.pyplot as plt # matplotlib options from matplotlib import rcParams rcParams['savefig.transparent'] = True rcParams['text.usetex'] = True # load audio audio_file = "samples/polyphonic/furElise.wav" audio = AudioLoader(audio_file) audio.cut(stop=10) fs = audio.sampleRate x = audio.signal frameSize = 2048 klapuri = MultiPitch(x, fs, method='klapuri', frameSize=frameSize) pitch = Hz_to_MIDI(klapuri()) time = np.arange(pitch.shape[1]) * (frameSize / fs) fig, ax = plt.subplots() fig.suptitle("Multi-pitch estimation using Klapuri's iterative method", fontsize=16) ax.set_title("Piano roll of Beethoven's \"Für Elise\"") for m in range(pitch.shape[0]): ax.scatter(time, pitch[m])
import matplotlib.pyplot as plt # matplotlib options from matplotlib import rcParams rcParams['savefig.transparent'] = True rcParams['text.usetex'] = True method = { 'HFC': 'hfc', 'Complex': 'complex', 'Phase Deviation': 'complex_phase', } # load audio audio_file = "samples/polyphonic/furElise.wav" audio = AudioLoader(audio_file) audio.cut(stop=10) fs = audio.sampleRate x = audio.signal t = audio.time() fig, ax = plt.subplots(len(method) + 1, 1, sharex=True) i = 0 ax[i].plot(t, x) ax[i].yaxis.set_ticklabels([]) ax[i].set_ylabel("Signal") for key in method.keys(): i += 1 onset = Onset(x, fs, method=method[key]) onsets = onset() ax[i].plot(onset.onsetTime, onset.onsetFunction)