def spectral_features(filelist): """ Given a list of files, retrieve them, analyse the first 100mS of each file and return a feature table. """ number_of_files = len(filelist) number_of_features = 5 features = np.zeros([number_of_files, number_of_features]) sample_rate = 44100 for file_index, url in enumerate(filelist): print url urllib.urlretrieve(url, filename='/tmp/localfile.wav') audio = MonoLoader(filename = '/tmp/localfile.wav', sampleRate = sample_rate)() zcr = ZeroCrossingRate() hamming_window = Windowing(type = 'hamming') # we need to window the frame to avoid FFT artifacts. spectrum = Spectrum() central_moments = CentralMoments() distributionshape = DistributionShape() spectral_centroid = Centroid() frame_size = int(round(0.100 * sample_rate)) # 100ms # Only do the first frame for now. # TODO we should generate values for the entire file, probably by averaging the features. current_frame = audio[0 : frame_size] features[file_index, 0] = zcr(current_frame) spectral_magnitude = spectrum(hamming_window(current_frame)) centroid = spectral_centroid(spectral_magnitude) spectral_moments = distributionshape(central_moments(spectral_magnitude)) features[file_index, 1] = centroid features[file_index, 2:5] = spectral_moments return features
def file_to_hpcp(filename): audio = MonoLoader(filename=filename)() windowing = Windowing(type='blackmanharris62') spectrum = Spectrum() spectral_peaks = SpectralPeaks(orderBy='magnitude', magnitudeThreshold=0.001, maxPeaks=20, minFrequency=20, maxFrequency=8000) hpcp = HPCP(maxFrequency=8000) # , # normalized='unitSum') #VERIFICAR QUE ISTO E O Q FAZ SENTIDO FAZER spec_group = [] hpcp_group = [] for frame in FrameGenerator(audio, frameSize=1024, hopSize=512): windowed = windowing(frame) fft = spectrum(windowed) frequencies, magnitudes = spectral_peaks(fft) final_hpcp = hpcp(frequencies, magnitudes) spec_group.append(fft) hpcp_group.append(final_hpcp) mean_hpcp = np.mean(np.array(hpcp_group).T, axis=1) return mean_hpcp
def hfc(filename): audio = MonoLoader(filename=filename, sampleRate=44100)() features = [] for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512): mag, phase =CartesianToPolar()(FFT()(Windowing(type='hann')(frame))) features.append(OnsetDetection(method='hfc')(mag, phase)) return Onsets()(array([features]),[1])
def noveltycurve(filename): audio = MonoLoader(filename=filename, sampleRate=44100)() band_energy = [] for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512): mag, phase, = CartesianToPolar()(FFT()(Windowing(type='hann')(frame))) band_energy.append(FrequencyBands()(mag)) novelty = NoveltyCurve()(band_energy) return Onsets()(np.array([novelty]),[1])
def feature_allframes(input_features, frame_indexer = None): audio = input_features['audio'] beats = input_features['beats'] # Initialise the algorithms w = Windowing(type = 'hann') spectrum = Spectrum() # FFT would return complex FFT, we only want magnitude melbands = MelBands(numberBands = NUMBER_BANDS) #~ mfcc = MFCC(numberBands = NUMBER_BANDS, numberCoefficients = NUMBER_COEFF) pool = Pool() if frame_indexer is None: frame_indexer = range(4,len(beats) - 1) # Exclude first frame, because it has no predecessor to calculate difference with # 13 MFCC coefficients # 40 Mel band energies #~ mfcc_coeffs = np.zeros((len(beats), NUMBER_COEFF)) mfcc_bands = np.zeros((len(beats), NUMBER_BANDS)) # 1 cosine distance value between every mfcc feature vector # 13 differences between MFCC coefficient of this frame and previous frame # 13 differences between MFCC coefficient of this frame and frame - 4 # 13 differences between the differences above # Idem for mel band energies #~ mfcc_coeff_diff = np.zeros((len(beats), NUMBER_COEFF)) mfcc_bands_diff = np.zeros((len(beats), NUMBER_BANDS * 4)) # Step 1: Calculate framewise for all output frames # Calculate this for all frames where this frame, or its successor, is in the frame_indexer for i in [i for i in range(len(beats)) if (i in frame_indexer) or (i+1 in frame_indexer) or (i-1 in frame_indexer) or (i-2 in frame_indexer) or (i-3 in frame_indexer)]: SAMPLE_RATE = 44100 start_sample = int(beats[i] * SAMPLE_RATE) end_sample = int(beats[i+1] * SAMPLE_RATE) #print start_sample, end_sample frame = audio[start_sample : end_sample if (start_sample - end_sample) % 2 == 0 else end_sample - 1] bands = melbands(spectrum(w(frame))) #~ bands, coeffs = mfcc(spectrum(w(frame))) #~ mfcc_coeffs[i] = coeffs mfcc_bands[i] = bands # Step 2: Calculate the cosine distance between the MFCC values for i in frame_indexer: # The norm of difference is usually very high around downbeat, because of melodic changes there! #~ mfcc_coeff_diff[i] = mfcc_coeffs[i+1] - mfcc_coeffs[i] mfcc_bands_diff[i][0*NUMBER_BANDS : 1*NUMBER_BANDS] = mfcc_bands[i+1] - mfcc_bands[i] mfcc_bands_diff[i][1*NUMBER_BANDS : 2*NUMBER_BANDS] = mfcc_bands[i+2] - mfcc_bands[i] mfcc_bands_diff[i][2*NUMBER_BANDS : 3*NUMBER_BANDS] = mfcc_bands[i+3] - mfcc_bands[i] mfcc_bands_diff[i][3*NUMBER_BANDS : 4*NUMBER_BANDS] = mfcc_bands[i] - mfcc_bands[i-1] # Include the MFCC coefficients as features result = mfcc_bands_diff[frame_indexer] #~ result = np.append(mfcc_coeff_diff[frame_indexer], mfcc_bands_diff[frame_indexer], axis=1) #~ print np.shape(result), np.shape(mfcc_coeff_diff), np.shape(mfcc_bands_diff) return preprocessing.scale(result)
def spectralCentroid(audio,params): """ hop size, frame size, window type """ hopSize, frameSize, wtype = params w = Windowing(type=wtype) spec = Spectrum() result = [] centroid = ess.Centroid(range=int(44100/2)) for frame in ess.FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize): sf = spec(w(frame)) result.append(centroid(sf)) return np.asarray(result),hopSize
def rms(audio,params): """ hop size, frame size, window type """ hopSize, frameSize, wtype = params w = Windowing(type=wtype) spec = Spectrum() result = [] RMS = ess.RMS() for frame in ess.FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize): sf = spec(w(frame)) result.append(RMS(sf)) return np.asarray(result),hopSize
def __init__(self, input_blocksize=1024, input_stepsize=512): super(Essentia_Dissonance, self).__init__() self.input_blocksize = input_blocksize self.input_stepsize = min(input_stepsize, self.input_blocksize) self.windower = Windowing(type='blackmanharris62') self.spec_alg = None self.spec_peaks_alg = None self.dissonance_alg = Dissonance() self.dissonance = []
def calculateDownbeats(self, audio, bpm, phase): # Step 0: calculate the CSD (Complex Spectral Difference) features # and the associated onset detection function ON LOWPASSED SIGNAL spec = Spectrum(size=self.FRAME_SIZE) w = Windowing(type='hann') fft = FFT() c2p = CartesianToPolar() od_csd = OnsetDetection(method='complex') lowpass = LowPass(cutoffFrequency=1500) pool = Pool() # TODO test faster (numpy) way #audio = lowpass(audio) for frame in FrameGenerator(audio, frameSize=self.FRAME_SIZE, hopSize=self.HOP_SIZE): mag, ph = c2p(fft(w(frame))) pool.add('onsets.complex', od_csd(mag, ph)) # Step 1: normalise the data using an adaptive mean threshold novelty_mean = self.adaptive_mean(pool['onsets.complex'], 16.0) # Step 2: half-wave rectify the result novelty_hwr = (pool['onsets.complex'] - novelty_mean).clip(min=0) # Step 7 (experimental): Determine downbeat locations as subsequence with highest complex spectral difference for i in range(4): phase_frames = (phase * 44100.0) / (512.0) frames = ( np.round( np.arange(phase_frames + i * self.numFramesPerBeat(bpm), np.size(novelty_hwr), 4 * self.numFramesPerBeat(bpm))).astype('int') )[: -1] # Discard last value to prevent reading beyond array (last value rounded up for example) pool.add('output.downbeat', np.sum(novelty_hwr[frames]) / np.size(frames)) plt.subplot(4, 1, i + 1) plt.plot(novelty_hwr) for f in frames: plt.axvline(x=f) print pool['output.downbeat'] downbeatIndex = np.argmax(pool['output.downbeat']) plt.show() # experimental return 1.0 * self.beats[downbeatIndex::4]
def feature_allframes(input_features, frame_indexer = None): audio = input_features['audio'] beats = input_features['beats'] # Initialise the algorithms w = Windowing(type = 'hann') loudness = Loudness() if frame_indexer is None: frame_indexer = range(1,len(beats) - 1) # Exclude first frame, because it has no predecessor to calculate difference with # 1 loudness value by default loudness_values = np.zeros((len(beats), 1)) # 1 difference value between loudness value cur and cur-1 # 1 difference value between loudness value cur and cur-4 # 1 difference value between differences above loudness_differences = np.zeros((len(beats), 9)) # Step 1: Calculate framewise for all output frames # Calculate this for all frames where this frame, or its successor, is in the frame_indexer for i in [i for i in range(len(beats)) if (i in frame_indexer) or (i+1 in frame_indexer) or (i-1 in frame_indexer) or (i-2 in frame_indexer) or (i-3 in frame_indexer) or (i-4 in frame_indexer) or (i-5 in frame_indexer) or (i-6 in frame_indexer) or (i-7 in frame_indexer) or (i-8 in frame_indexer)]: SAMPLE_RATE = 44100 start_sample = int(beats[i] * SAMPLE_RATE) end_sample = int(beats[i+1] * SAMPLE_RATE) #print start_sample, end_sample frame = audio[start_sample : end_sample if (start_sample - end_sample) % 2 == 0 else end_sample - 1] loudness_values[i] = loudness(w(frame)) # Step 2: Calculate the cosine distance between the MFCC values for i in frame_indexer: loudness_differences[i][0] = (loudness_values[i] - loudness_values[i-1]) loudness_differences[i][1] = (loudness_values[i+1] - loudness_values[i]) loudness_differences[i][2] = (loudness_values[i+2] - loudness_values[i]) loudness_differences[i][3] = (loudness_values[i+3] - loudness_values[i]) loudness_differences[i][4] = (loudness_values[i+4] - loudness_values[i]) loudness_differences[i][5] = (loudness_values[i+5] - loudness_values[i]) loudness_differences[i][6] = (loudness_values[i+6] - loudness_values[i]) loudness_differences[i][7] = (loudness_values[i+7] - loudness_values[i]) loudness_differences[i][8] = (loudness_values[i-1] - loudness_values[i+1]) # Include the raw values as absolute features result = loudness_differences[frame_indexer] #~ print np.shape(result), np.shape(loudness_values), np.shape(loudness_differences) return preprocessing.scale(result)
def __call__(self, audio, SR, sumThreshold=1e-5): self.__reset__() if audio.ndim > 1: audio = np.sum(audio, axis=1) / audio.ndim fcIndexArr = [] self.hist = np.zeros(int(self.frameSize / 2 + 1)) fft = FFT(size=self.frameSize) # declare FFT function window = Windowing(size=self.frameSize, type="hann") # declare windowing function self.avgFrames = np.zeros(int(self.frameSize / 2) + 1) maxNrg = max([ sum(abs(fft(window(frame)))**2) for frame in FrameGenerator(audio, frameSize=self.frameSize, hopSize=self.hopSize, startFromZero=True) ]) for i, frame in enumerate( FrameGenerator(audio, frameSize=self.frameSize, hopSize=self.hopSize, startFromZero=True)): frame = window(frame) # apply window to the frame frameFft = abs(fft(frame)) nrg = sum(frameFft**2) if nrg >= 0.1 * maxNrg: for j in reversed(range(len(frameFft))): if sum(frameFft[j:] / j) >= sumThreshold: fcIndexArr.append(j) self.hist[j] += nrg break self.avgFrames = self.avgFrames + frameFft if len(fcIndexArr) == 0: fcIndexArr.append(int(self.frameSize / 2) + 1) self.hist[int(self.frameSize / 2)] += 1 self.avgFrames /= (i + 1) self.mostLikelyBin, conf, binary = self.__computeMeanFc( fcIndexArr, np.arange(int(self.frameSize / 2) + 2), hist=self.hist) return self.mostLikelyBin * SR / self.frameSize, conf, binary
def mel40_analyzer(): window = Windowing(size=256, type='blackmanharris62') spectrum = Spectrum(size=256) mel = MelBands( inputSize=129, numberBands=40, lowFrequencyBound=27.5, highFrequencyBound=8000.0, sampleRate=16000.0) def analyzer(samples): feats = [] for frame in FrameGenerator(samples, 256, 160): frame_feats = mel(spectrum(window(frame))) frame_feats = np.log(frame_feats + 1e-16) feats.append(frame_feats) return np.array(feats) return analyzer
def rms_centroids(filename, frameSize=1024, hopSize=512, sampleRate=44100): # load our audio into an array audio = MonoLoader(filename=filename, sampleRate=44100)() # create the pool and the necessary algorithms w = Windowing() spec = Spectrum() rms = RMS() centroid = Centroid(range=int(sampleRate / 2)) cs = [] rmss = [] # compute the centroid for all frames in our audio and add it to the pool for frame in FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize): sf = spec(w(frame)) cs.append(centroid(sf)) rmss.append(rms(sf)) return np.array(rmss), np.array(cs)
def create_analyzers(fs=44100.0, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0): analyzers = [] for nfft in nffts: window = Windowing(size=nfft, type='blackmanharris62') spectrum = Spectrum(size=nfft) mel = MelBands(inputSize=(nfft // 2) + 1, numberBands=mel_nband, lowFrequencyBound=mel_freqlo, highFrequencyBound=mel_freqhi, sampleRate=fs) analyzers.append((window, spectrum, mel)) return analyzers
def hcdf(filename): audio = MonoLoader(filename=filename)() windowing = Windowing(type='hann') for frame in FrameGenerator(audio, frameSize=32768, hopSize=4096): windowed = windowing(frame) print('window', windowed) # ConstantQ transform # constant_q = ConstantQ(binsPerOctave=36, minFrequency=110, maxFrequency=3520, sampleRate=11025) # kk = constant_q(windowed) # 12 bin tunned Chromagram # pedirle al ruso que lo ponga chroma = Chromagram(numberBins=12, binsPerOctave=36, minFrequency=110, windowType='hann') # maxFrequency=3520 pitch_class_vectors = chroma(frame) print('pitch_class_vectors', pitch_class_vectors)
def ninos(filename,gamma=0.94): """ reference: Mounir, M., Karsmakers, P., & Van Waterschoot, T. (2016). Guitar note onset detection based on a spectral sparsity measure. European Signal Processing Conference. https://doi.org/10.1109/EUSIPCO.2016.7760394 """ N = 2048 hopSize = int(N/10) J = int(N*gamma/2) audio = MonoLoader(filename=filename, sampleRate=44100)() mag = [] for frame in FrameGenerator(audio, frameSize = N, hopSize = hopSize): m = CartesianToPolar()(FFT()(Windowing(type='hann')(frame)))[0] m = np.asarray(m) idx = np.argsort(m)[::-1][:J] mag.append(m[idx]) mag = np.asarray(mag) x2 = mag*mag inos=np.sum(x2,axis=1)/(np.sum(x2*x2,axis=1)**(0.25)) ninos = inos/(J**(0.25)) return OnsetPeakPickingProcessor(threshold=0.03,fps=44100/hopSize)(ninos)
def shared_main(source, dest, display_result): source_audio = _loader(source) destination_audio = _loader(dest) source_frame = FrameGenerator(source_audio, frameSize=2048, hopSize=512) destination_frame = FrameGenerator(destination_audio, frameSize=2048, hopSize=512) window = Windowing(type='hann') # window function spectrum = Spectrum() # spectrum function pitch_yin_fft = PitchYinFFT() # pitch extractor pitch_saliennce = PitchSalience() loudness = Loudness() # draw_plot(source_frame, window, spectrum, pitch_yin_fft) min_cost, match_result = compare(source_frame, destination_frame, window, \ spectrum, pitch_yin_fft, 5, 1, 1, display_result, loudness) return min_cost, match_result
def run(self, audio): # Calculate the melflux onset detection function pool = Pool() w = Windowing(type='hann') fft = np.fft.fft od_flux = OnsetDetection(method='melflux') for frame in FrameGenerator(audio, frameSize=self.FRAME_SIZE, hopSize=self.HOP_SIZE): pool.add('audio.windowed_frames', w(frame)) fft_result = fft(pool['audio.windowed_frames']).astype('complex64') fft_result_mag = np.absolute(fft_result) fft_result_ang = np.angle(fft_result) self.fft_mag_1024_512 = fft_result_mag self.fft_phase_1024_512 = fft_result_ang for mag, phase in zip(fft_result_mag, fft_result_ang): pool.add('onsets.complex', od_flux(mag, phase)) odf = pool['onsets.complex'] # Given the ODF, calculate the tempo and the phase tempo, tempo_curve, phase, phase_curve = BeatTracker.get_tempo_and_phase_from_odf( odf, self.HOP_SIZE) # Calculate the beat annotations spb = 60. / tempo #seconds per beat beats = (np.arange(phase, (np.size(audio) / self.SAMPLE_RATE) - spb + phase, spb).astype('single')) # Store all the results self.bpm = tempo self.phase = phase self.beats = beats self.onset_curve = BeatTracker.hwr(pool['onsets.complex'])
def feature_allframes(audio, beats, frame_indexer = None): # Initialise the algorithms w = Windowing(type = 'blackmanharris92') spectrum = Spectrum() specPeaks = SpectralPeaks() hpcp = HPCP() if frame_indexer is None: frame_indexer = range(1,len(beats) - 1) # Exclude first frame, because it has no predecessor to calculate difference with # 12 chromagram values by default chroma_values = np.zeros((len(beats), 12)) # Difference between chroma vectors chroma_differences = np.zeros((len(beats), 3)) # Step 1: Calculate framewise for all output frames # Calculate this for all frames where this frame, or its successor, is in the frame_indexer for i in [i for i in range(len(beats)) if (i in frame_indexer) or (i+1 in frame_indexer) or (i+1 in frame_indexer)]: SAMPLE_RATE = 44100 start_sample = int(beats[i] * SAMPLE_RATE) end_sample = int(beats[i+1] * SAMPLE_RATE) #print start_sample, end_sample frame = audio[start_sample : (end_sample if (start_sample - end_sample) % 2 == 0 else end_sample - 1)] freq, mag = specPeaks(spectrum(w(frame))) chroma_values[i] = hpcp(freq, mag) # Step 2: Calculate the cosine distance between the MFCC values for i in frame_indexer: chroma_differences[i][0] = np.linalg.norm(chroma_values[i] - chroma_values[i-1]) chroma_differences[i][1] = np.linalg.norm(chroma_values[i] - chroma_values[i+1]) chroma_differences[i][2] = np.linalg.norm(chroma_values[i-1] - chroma_values[i+1]) # Include the raw values as absolute features result = np.append(chroma_values[frame_indexer], chroma_differences[frame_indexer], axis=1) #~ print np.shape(result), np.shape(chroma_values), np.shape(chroma_differences) return preprocessing.scale(result)
def f_essentia_extract(Audio): ## METODOS DE LIBRERIA QUE DETECTAN DONDE OCURRE CADA NOTA RESPECTO AL TIEMPO od2 = OnsetDetection(method='complex') # Let's also get the other algorithms we will need, and a pool to store the results w = Windowing(type='hann') fft = FFT() # this gives us a complex FFT c2p = CartesianToPolar( ) # and this turns it into a pair (magnitude, phase) pool = essentia.Pool() # Computing onset detection functions. for frame in FrameGenerator(Audio, frameSize=1024, hopSize=512): mag, phase, = c2p(fft(w(frame))) pool.add('features.complex', od2(mag, phase)) ## inicio de cada "nota" onsets = Onsets() tiempos_detectados_essentia = onsets( essentia.array([pool['features.complex']]), [1]) #print(tiempos_detectados_essentia) return tiempos_detectados_essentia
def detectBW(audio: list, SR: float, frame_size=256, hop_size=128, floor_db=-90, oversample_f=1): frame_size *= oversample_f # if an oversample factor is desired, apply it fc_index_arr = [] fft = FFT(size=frame_size) # declare FFT function window = Windowing(size=frame_size, type="hann") # declare windowing function for frame in FrameGenerator(audio, frameSize=frame_size, hopSize=hop_size, startFromZero=True): frame_fft = abs(fft(window(frame))) frame_fft_db = 20 * np.log10( frame_fft + eps) # calculate frame fft values in db # compute the linear interpolation between the values of the maxima of the spectrum interp_frame = compute_spectral_envelope(frame_fft_db, "linear") interp_frame = modify_floor(interp_frame, floor_db, log=True) fc_index = compute_fc(interp_frame) if energy_verification(frame_fft, fc_index): fc_index_arr.append(fc_index) if len(fc_index_arr) == 0: fc_index_arr = [frame_size] fc_bin, conf, binary = compute_mean_fc(fc_index_arr, np.arange(len(frame_fft)), SR) # print("mean_fc: ", fc_bin*SR/frame_size ," conf: ", conf ," binary_result: ", binary) return fc_bin * SR / frame_size, conf, binary
def feature_allframes(audio, beats, frame_indexer = None): # Initialise the algorithms w = Windowing(type = 'hann') loudness = Loudness() if frame_indexer is None: frame_indexer = range(1,len(beats) - 1) # Exclude first frame, because it has no predecessor to calculate difference with # 1 loudness value by default loudness_values = np.zeros((len(beats), 1)) # 1 difference value between loudness value cur and cur-1 # 1 difference value between loudness value cur and cur-4 # 1 difference value between differences above loudness_feature_vector = np.zeros((len(beats), 4)) # Step 1: Calculate framewise for all output frames # Calculate this for all frames where this frame, or its successor, is in the frame_indexer for i in [i for i in range(len(beats)) if (i in frame_indexer) or (i-1 in frame_indexer) or (i-2 in frame_indexer) or (i-3 in frame_indexer)]: SAMPLE_RATE = 44100 start_sample = int(beats[i] * SAMPLE_RATE) end_sample = int(beats[i+1] * SAMPLE_RATE) #print start_sample, end_sample frame = audio[start_sample : end_sample if (start_sample - end_sample) % 2 == 0 else end_sample - 1] loudness_values[i] = loudness(w(frame)) loudness_values = preprocessing.scale(loudness_values) # Step 2: construct feature vector for i in frame_indexer: loudness_feature_vector[i] = np.reshape(loudness_values[i:i+4], (4,)) # Include the raw values as absolute features result = loudness_feature_vector[frame_indexer] #~ print np.shape(result), np.shape(loudness_values), np.shape(loudness_differences) return result
def extract_features_from_frame(self, frame): """ Return dictionary of features for the given frame.""" centroid = Centroid(range=22050) hamming_window = Windowing(type='hamming') zcr = ess.ZeroCrossingRate() spectrum = ess.Spectrum() central_moments = ess.CentralMoments() # Spectrum can only compute FFT of array of even size (don't know why) if len(frame) % 2 == 1: frame = frame[:-1] spectral_magnitude = spectrum(hamming_window(frame)) feat_dic = {'zrc':zcr(frame), 'centroid':centroid(spectral_magnitude)} # Central moments central_moments = ess.CentralMoments() central_moms = central_moments(hamming_window(frame)) for idx, icm in enumerate(central_moms): feat_dic['cm{}'.format(idx)] = icm # Distribution shape distributionshape = ess.DistributionShape() for idx, ism in enumerate(distributionshape(central_moms)): feat_dic['sm{}'.format(idx)] = ism return feat_dic
beats = beatTracker.getBeats() bpm = beatTracker.getBpm() phase = beatTracker.getPhase() beats = beats - phase print 'Bpm: ', bpm print 'Frame size in samples: ', 44100 * (60.0 / bpm) # Followed approach from Foote # Adjust the frame size to the length of a beat, to extract beat-aligned information (zelf-uitgevonden) FRAME_SIZE = int(44100 * (60.0 / bpm)) HOP_SIZE = FRAME_SIZE / 2 frames_per_second = (44100.0 / FRAME_SIZE) * (FRAME_SIZE / HOP_SIZE) beats = beats * frames_per_second spec = Spectrum(size=FRAME_SIZE - FRAME_SIZE % 2) w = Windowing(type='hann') spectrum = Spectrum() # FFT would return complex FFT, we only want magnitude mfcc = MFCC() pool = Pool() # Step 0: align audio with phase beats = beats - 0.5 start_sample = int((phase) * (44100.0 * 60 / bpm)) # Step 1: Calculate framewise MFCC for frame in FrameGenerator(audio[start_sample:], frameSize=FRAME_SIZE, hopSize=HOP_SIZE): mfcc_bands, mfcc_coeffs = mfcc(
def run(self, audio): # TODO put this in some util class # Step 0: calculate the CSD (Complex Spectral Difference) features # and the associated onset detection function spec = Spectrum(size=self.FRAME_SIZE) w = Windowing(type='hann') fft = FFT() c2p = CartesianToPolar() od_csd = OnsetDetection(method='complex') pool = Pool() # TODO test faster (numpy) way for frame in FrameGenerator(audio, frameSize=self.FRAME_SIZE, hopSize=self.HOP_SIZE): mag, phase = c2p(fft(w(frame))) pool.add('onsets.complex', od_csd(mag, phase)) # Step 1: normalise the data using an adaptive mean threshold novelty_mean = self.adaptive_mean(pool['onsets.complex'], 16.0) # Step 2: half-wave rectify the result novelty_hwr = (pool['onsets.complex'] - novelty_mean).clip(min=0) # Step 3: then calculate the autocorrelation of this signal novelty_autocorr = self.autocorr(novelty_hwr) # Step 4: Sum over constant intervals to detect most likely BPM valid_bpms = np.arange(self.minBpm, self.maxBpm, self.stepBpm) for bpm in valid_bpms: frames = ( np.round( np.arange(0, np.size(novelty_autocorr), self.numFramesPerBeat(bpm))).astype('int') )[: -1] # Discard last value to prevent reading beyond array (last value rounded up for example) pool.add('output.bpm', np.sum(novelty_autocorr[frames]) / np.size(frames)) bpm = valid_bpms[np.argmax(pool['output.bpm'])] # Step 5: Calculate phase information valid_phases = np.arange(0.0, 60.0 / bpm, 0.001) # Valid phases in SECONDS for phase in valid_phases: # Convert phase from seconds to frames phase_frames = (phase * 44100.0) / (512.0) frames = ( np.round( np.arange(phase_frames, np.size(novelty_hwr), self.numFramesPerBeat(bpm))).astype('int') )[: -1] # Discard last value to prevent reading beyond array (last value rounded up for example) pool.add('output.phase', np.sum(novelty_hwr[frames]) / np.size(frames)) phase = valid_phases[np.argmax(pool['output.phase'])] print 'PHASE', phase # Step 6: Determine the beat locations spb = 60. / bpm #seconds per beat beats = (np.arange(phase, (np.size(audio) / 44100) - spb + phase, spb).astype('single')) # Store all the results self.bpm = bpm self.phase = phase self.beats = beats self.downbeats = self.calculateDownbeats(audio, bpm, phase)
def run(self, audio): def numFramesPerBeat(bpm): return (60.0 * self.SAMPLE_RATE) / (self.HOP_SIZE * bpm) def autocorr(x): result = np.correlate(x, x, mode='full') return result[result.size / 2:] def adaptive_mean(x, N): return np.convolve(x, [1.0] * int(N), mode='same') / N # Step 0: calculate the CSD (Complex Spectral Difference) features # and the associated onset detection function spec = Spectrum(size=self.FRAME_SIZE) w = Windowing(type='hann') fft = np.fft.fft c2p = CartesianToPolar() od_csd = OnsetDetection(method='melflux') pool = Pool() for frame in FrameGenerator(audio, frameSize=self.FRAME_SIZE, hopSize=self.HOP_SIZE): pool.add('audio.windowed_frames', w(frame)) fft_result = fft(pool['audio.windowed_frames']).astype('complex64') fft_result_mag = np.absolute(fft_result) fft_result_ang = np.angle(fft_result) for mag, phase in zip(fft_result_mag, fft_result_ang): pool.add('onsets.complex', od_csd(mag, phase)) # Step 1: normalise the data using an adaptive mean threshold novelty_mean = adaptive_mean(pool['onsets.complex'], 16.0) # Step 2: half-wave rectify the result novelty_hwr = (pool['onsets.complex'] - novelty_mean).clip(min=0) # Step 3: then calculate the autocorrelation of this signal novelty_autocorr = autocorr(novelty_hwr) # Step 4: Sum over constant intervals to detect most likely BPM valid_bpms = np.arange(self.minBpm, self.maxBpm, self.stepBpm) for bpm in valid_bpms: frames = ( np.round( np.arange(0, np.size(novelty_autocorr), numFramesPerBeat(bpm))).astype('int') )[: -1] # Discard last value to prevent reading beyond array (last value rounded up for example) pool.add('output.bpm', np.sum(novelty_autocorr[frames]) / np.size(frames)) bpm = valid_bpms[np.argmax(pool['output.bpm'])] # Step 5: Calculate phase information valid_phases = np.arange(0.0, 60.0 / bpm, 0.001) # Valid phases in SECONDS for phase in valid_phases: # Convert phase from seconds to frames phase_frames = (phase * 44100.0) / (512.0) frames = ( np.round( np.arange(phase_frames, np.size(novelty_hwr), numFramesPerBeat(bpm))).astype('int') )[: -1] # Discard last value to prevent reading beyond array (last value rounded up for example) pool.add('output.phase', np.sum(novelty_hwr[frames]) / np.size(frames)) phase = valid_phases[np.argmax(pool['output.phase'])] # Step 6: Determine the beat locations spb = 60. / bpm #seconds per beat beats = (np.arange(phase, (np.size(audio) / 44100) - spb + phase, spb).astype('single')) # Store all the results self.bpm = bpm self.phase = phase self.beats = beats
import time import essentia from essentia.standard import Extractor, MonoLoader, Trimmer, Mean, FrameGenerator, Spectrum, SpectralPeaks, Dissonance, BarkBands, Windowing, \ ZeroCrossingRate, OddToEvenHarmonicEnergyRatio, EnergyBand, MetadataReader, OnsetDetection, Onsets, CartesianToPolar, FFT, MFCC, SingleGaussian from build_map import build_map sampleRate = 44100 frameSize = 2048 hopSize = 1024 windowType = "hann" mean = Mean() keyDetector = essentia.standard.Key(pcpSize=12) spectrum = Spectrum() window = Windowing(size=frameSize, zeroPadding=0, type=windowType) mfcc = MFCC() gaussian = SingleGaussian() od = OnsetDetection(method='hfc') fft = FFT() # this gives us a complex FFT c2p = CartesianToPolar() # and this turns it into a pair (magnitude, phase) onsets = Onsets(alpha=1) # dissonance spectralPeaks = SpectralPeaks(sampleRate=sampleRate, orderBy='frequency') dissonance = Dissonance() # barkbands barkbands = BarkBands(sampleRate=sampleRate) # zero crossing rate
def test_windowing(self): Windowing(type='hann')
def feature_allframes(audio, beats, frame_indexer = None): # Initialise the algorithms FRAME_SIZE = 1024 HOP_SIZE = 512 spec = Spectrum(size = FRAME_SIZE) w = Windowing(type = 'hann') fft = np.fft.fft od_csd = OnsetDetection(method = 'complex') od_hfc = OnsetDetection(method = 'flux') pool = Pool() # Calculate onset detection curve on audio for frame in FrameGenerator(audio, frameSize = FRAME_SIZE, hopSize = HOP_SIZE): pool.add('windowed_frames', w(frame)) fft_result = fft(pool['windowed_frames']).astype('complex64') fft_result_mag = np.absolute(fft_result) fft_result_ang = np.angle(fft_result) for mag,phase in zip(fft_result_mag, fft_result_ang): pool.add('onsets.flux', od_hfc(mag, phase)) # Normalize and half-rectify onset detection curve def adaptive_mean(x, N): return np.convolve(x, [1.0]*int(N), mode='same')/N novelty_mean = adaptive_mean(pool['onsets.flux'], 16.0) novelty_hwr = (pool['onsets.flux'] - novelty_mean).clip(min=0) novelty_hwr = novelty_hwr / np.average(novelty_hwr) # For every frame in frame_indexer, if frame_indexer is None: frame_indexer = list(range(4,len(beats) - 1)) # Exclude first frame, because it has no predecessor to calculate difference with # Feature: correlation between current frame onset detection f and of previous frame # Feature: correlation between current frame onset detection f and of next frame # Feature: diff between correlation between current frame onset detection f and corr cur and next onset_integrals = np.zeros((2 * len(beats), 1)) frame_i = (np.array(beats) * 44100.0/ HOP_SIZE).astype('int') onset_correlations = np.zeros((len(beats), 21)) for i in [i for i in range(len(beats)) if (i in frame_indexer) or (i+1 in frame_indexer) or (i-1 in frame_indexer) or (i-2 in frame_indexer) or (i-3 in frame_indexer) or (i-4 in frame_indexer) or (i-5 in frame_indexer) or (i-6 in frame_indexer) or (i-7 in frame_indexer)]: half_i = int((frame_i[i] + frame_i[i+1]) / 2) cur_frame_1st_half = novelty_hwr[frame_i[i] : half_i] cur_frame_2nd_half = novelty_hwr[half_i : frame_i[i+1]] onset_integrals[2*i] = np.sum(cur_frame_1st_half) onset_integrals[2*i + 1] = np.sum(cur_frame_2nd_half) # Step 2: Calculate the cosine distance between the MFCC values for i in frame_indexer: onset_correlations[i][0] = max(np.correlate(novelty_hwr[frame_i[i-1] : frame_i[i]], novelty_hwr[frame_i[i] : frame_i[i+1]], mode='valid')) # Only 1 value onset_correlations[i][1] = max(np.correlate(novelty_hwr[frame_i[i] : frame_i[i+1]], novelty_hwr[frame_i[i+1] : frame_i[i+2]], mode='valid')) # Only 1 value onset_correlations[i][2] = max(np.correlate(novelty_hwr[frame_i[i] : frame_i[i+1]], novelty_hwr[frame_i[i+2] : frame_i[i+3]], mode='valid')) # Only 1 value onset_correlations[i][3] = max(np.correlate(novelty_hwr[frame_i[i] : frame_i[i+1]], novelty_hwr[frame_i[i+3] : frame_i[i+4]], mode='valid')) # Only 1 value # Difference in integrals of novelty curve between frames # Quantifies the difference in number and prominence of onsets in this frame onset_correlations[i][4] = onset_integrals[2*i] - onset_integrals[2*i-1] onset_correlations[i][5] = onset_integrals[2*i+2] + onset_integrals[2*i+3] - onset_integrals[2*i-1] - onset_integrals[2*i-2] for j in range(1,16): onset_correlations[i][5 + j] = onset_integrals[2*i + j] - onset_integrals[2*i] # Include the MFCC coefficients as features result = onset_correlations[frame_indexer] return preprocessing.scale(result)