コード例 #1
0
ファイル: audio.py プロジェクト: knowbodynos/fierai
 def extract_features(self):
     # Generates a Short-time Fourier transform (STFT) to use in the chroma_stft
     stft = np.abs(librosa.stft(self.waveform))
     # Generate Mel-frequency cepstral coefficients (MFCCs) from a time series
     mfccs = mfcc(y=self.waveform, sr=self.sample_rate,
                  n_mfcc=40).mean(axis=1)
     # Computes a chromagram from a waveform or power spectrogram.
     chroma = chroma_stft(S=stft, sr=self.sample_rate).mean(axis=1)
     # Computes a mel-scaled spectrogram.
     mel = melspectrogram(self.waveform, sr=self.sample_rate).mean(axis=1)
     # Computes spectral contrast
     contrast = spectral_contrast(S=stft, sr=self.sample_rate).mean(axis=1)
     # Computes the tonal centroid features (tonnetz)
     harmonic = librosa.effects.harmonic(self.waveform)
     tonn = tonnetz(y=harmonic, sr=self.sample_rate).mean(axis=1)
     return np.concatenate([mfccs, chroma, mel, contrast, tonn], axis=0)
コード例 #2
0
    def _calc_feat(self, window, feat_name):
        feat = None
        # calculate feature
        if feat_name == 'mfcc':
            feat = FT.mfcc(y=window, sr=self.sr, n_mfcc=_N_MFCC)
        elif feat_name == 'chroma_stft':
            feat = FT.chroma_stft(y=window, sr=self.sr)
        elif feat_name == 'melspectrogram':
            feat = FT.melspectrogram(y=window,
                                     sr=self.sr,
                                     n_mels=128,
                                     n_fft=1024,
                                     hop_length=512)
            feat = L.power_to_db(feat)
        elif feat_name == 'spectral_centroid':
            feat = FT.spectral_centroid(y=window, sr=self.sr)
        elif feat_name == 'spectral_rolloff':
            feat = FT.spectral_rolloff(y=window, sr=self.sr)
        elif feat_name == 'tonnetz':
            feat = FT.tonnetz(y=window, sr=self.sr)
        elif feat_name == 'zero_crossing_rate':
            feat = FT.zero_crossing_rate(y=window)
        else:
            assert False, 'Invalid feature'

        # pool feature from multiple frames
        if self.feature_pool == 'sum':
            feat = feat.sum(axis=1)
        elif self.feature_pool == 'max':
            feat = feat.max(axis=1)
        elif self.feature_pool == 'mean':
            feat = feat.mean(axis=1)
        elif self.feature_pool == 'flatten':
            feat = feat.flatten()
        elif self.feature_pool == 'none':
            pass
        else:
            assert False, 'Invalid feature pooling scheme'

        # normalize features
        if self.l2_norm and feat.shape[0] > 1:
            feat /= np.linalg.norm(feat)
        return feat
コード例 #3
0
ファイル: HCDF.py プロジェクト: PRamoneda/HCDF
def get_tonal_centroid_transform(y, sr, tonal_model, doce_bins_tuned_chroma):
    """
        returns centroids from tonal model

        Parameters
        ----------
        hpss : bool
            true or false depends on hpss block

        name_file: str
            name of the file that is being computed

        y : number > 0 [scalar]
            audio

        sr: number > 0 [scalar]
            target sampling rate

        chroma: str
            chroma-samplerate-framesize-overlap

        tonal_model: str optional
            Tonal model block type. "TIV2" for Tonal Interval space focus on audio. "TIV2" for audio. "TIV2_Symb" for symbolic data.
            "tonnetz" for harte centroids aproach. Default TIV2\

        doce_bins_tuned_chroma: list
            list of chroma vectors

        Returns
        -------
        list of tonal centroids vectors
    """
    centroid_vector = None
    if tonal_model == 'tonnetz':
        centroid_vector = tonnetz(y=y, sr=sr, chroma=doce_bins_tuned_chroma)
    elif tonal_model == 'TIV2':
        centroid_vector = tonal_interval_space(doce_bins_tuned_chroma)
    elif tonal_model == 'TIV2_symb':
        centroid_vector = tonal_interval_space(doce_bins_tuned_chroma,
                                               symbolic=True)
    return centroid_vector
コード例 #4
0
y, sr = load(my_file)

D = np.abs(stft(y))

specshow(amplitude_to_db(D, ref=np.max), y_axis='log', x_axis='time')
plt.title("Power Spectrogram")
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()

chroma_cq = chroma_cqt(y=y, sr=sr)
specshow(chroma_cq, y_axis='chroma', x_axis='time')
plt.title("Chromagram Constant Q Transform")
plt.colorbar()
plt.tight_layout()

tonnetz = tonnetz(y=y, sr=sr)
specshow(tonnetz, y_axis='tonnetz')
plt.title("Tonnetz Example")
plt.colorbar()
plt.tight_layout()

ms = melspectrogram(y=y, sr=sr)
specshow(power_to_db(ms, ref=np.max), y_axis='mel', fmax=8000, x_axis='time')
plt.title("Mel Spectrogram Example")
plt.colorbar(format="%+2.0f dB")
plt.tight_layout()

waveplot(y=y, sr=sr)
plt.title("Audio Waveplot")
plt.tight_layout()
コード例 #5
0
def tonnetz(args):
    sig = get_sig(args)
    fs = args['fs']
    return rosaft.tonnetz(y=sig, sr=fs)
コード例 #6
0
def extract_features(soundwave,sampling_rate,sound_name="test",feature_list=[]):
    """
    extracts features with help of librosa
    :param soundwave: extracted soundwave from file
    :param sampling_rate: sampling rate
    :param feature_list: list of features to compute
    :param sound_name: type of sound, i.e. dog
    :return: np.array of all features for the soundwave
    """
    print("Computing features for ",sound_name)

    if len(feature_list)==0:
        feature_list=["chroma_stft","chroma_cqt","chroma_cens","melspectrogram",
                      "mfcc","rmse","spectral_centroid","spectral_bandwidth",
                      "spectral_contrast","spectral_flatness","spectral_rolloff",
                      "poly_features","tonnetz","zero_crossing_rate"]

    features=[]


    #feature_len
    #"chroma_stft":12
    if "chroma_stft" in feature_list:
        features.append(feat.chroma_stft(soundwave, sampling_rate))

    #"chroma_cqt":12
    if "chroma_cqt" in feature_list:
        features.append(feat.chroma_cqt(soundwave, sampling_rate))

    #"chroma_cens":12
    if "chroma_cens" in feature_list:
        features.append(feat.chroma_cens(soundwave, sampling_rate))

    #"malspectrogram":128
    if "melspectrogram" in feature_list:
        features.append(feat.melspectrogram(soundwave, sampling_rate))

    #"mfcc":20
    if "mfcc" in feature_list:
        features.append(feat.mfcc(soundwave, sampling_rate))

    #"rmse":1
    if "rmse" in feature_list:
        features.append(feat.rmse(soundwave))

    #"spectral_centroid":1
    if "spectral_centroid" in feature_list:
        features.append(feat.spectral_centroid(soundwave, sampling_rate))

    #"spectral_bandwidth":1
    if "spectral_bandwidth" in feature_list:
        features.append(feat.spectral_bandwidth(soundwave, sampling_rate))

    #"spectral_contrast":7
    if "spectral_contrast" in feature_list:
        features.append(feat.spectral_contrast(soundwave, sampling_rate))

    #"spectral_flatness":1
    if "spectral_flatness" in feature_list:
        features.append(feat.spectral_flatness(soundwave))

    #"spectral_rolloff":1
    if "spectral_rolloff" in feature_list:
        features.append(feat.spectral_rolloff(soundwave, sampling_rate))

    #"poly_features":2
    if "poly_features" in feature_list:
        features.append(feat.poly_features(soundwave, sampling_rate))

    #"tonnetz":6
    if "tonnetz" in feature_list:
        features.append(feat.tonnetz(soundwave, sampling_rate))

    #"zero_crossing_rate":1
    if "zero_crossing_rate" in feature_list:
        features.append(feat.zero_crossing_rate(soundwave))


    return np.concatenate(features)
コード例 #7
0
        row = np.concatenate((row, cstft))
        cqt = np.mean(lf.chroma_cqt(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, cqt))
        sens = np.mean(lf.chroma_cens(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, sens))
        spcent = np.mean(lf.spectral_centroid(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, spcent))
        flatness = np.mean(lf.spectral_flatness(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, flatness))
        rolloff = np.mean(lf.spectral_rolloff(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, rolloff))
        mspec = np.mean(lf.melspectrogram(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, mspec))
        mfcc = np.mean(lf.mfcc(thing1[:-1], n_mfcc=30).T, axis=0)
        row = np.concatenate((row, mfcc))
        tonnetz = np.mean(lf.tonnetz(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, tonnetz))
        rmse = np.mean(lf.rmse(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, rmse))
        contrast = np.mean(lf.spectral_contrast(thing1[:-1]).T, axis=0)
        row = np.concatenate((row, contrast))
        tempo = np.mean(lf.tempogram(thing[:-1], win_length=88).T, axis=0)
        row = np.concatenate((row, tempo))
        row = np.append(row, thing1[-1])
        #print(len(row))

        train_data = np.append(train_data, row)
        counter += 1

columns = ["feat_" + str(i) for i in range(299)]
columns.append("class")
コード例 #8
0
 def tonnetz(self):
     sig = get_sig(self.args)
     fs = self.args['fs']
     return rosaft.tonnetz(y=sig, sr=fs)
コード例 #9
0
ファイル: HCDF.py プロジェクト: TheRealMorpheus1/HCDF
		    chroma-samplerate-framesize-overlap
		
		tonal_model: str optional
			Tonal model block type. "TIV2" for Tonal Interval space focus on audio. "TIV2" for audio. "TIV2_Symb" for symbolic data.
			"tonnetz" for harte centroids aproach. Default TIV2\

		doce_bins_tuned_chroma: list
			list of chroma vectors
		
		Returns
		-------
		list of tonal centroids vectors	
	"""
	centroid_vector = None
	if tonal_model == 'tonnetz':
		centroid_vector = tonnetz(y=y, sr=sr, chroma=doce_bins_tuned_chroma)
	elif tonal_model == 'TIV2':
		centroid_vector = tonal_interval_space(doce_bins_tuned_chroma)
	elif tonal_model == 'TIV2_symb':
		centroid_vector = tonal_interval_space(doce_bins_tuned_chroma, symbolic=True)
	return centroid_vector


def tonal_centroid_transform(hpss, chroma, name_file, y, sr, tonal_model, doce_bins_tuned_chroma):
	"""
		wrapper of tonal centroid transform for save all results for future same calculations

		Parameters
		----------
		hpss : bool
			true or false depends on hpss block