def gerador_de_audio(): files = [] for base in bases_B: instances_list = [] rate_list = [] files.append(func.get_filenames(path=base, filetype='.wav')) for i in range(len(files[-1])): instance, rate = librosa.load(base + '/' + files[-1][i]) # filtragem e decomposicao rec_signal = func.wavelet_filtering(instance, th) instances_list.append(rec_signal) rate_list.append(rate) print('Done with: ' + files[-1][i]) print('Done with: ' + base + '\n') # mudanca p/ novo diretorio new_path = base + "/filtered" os.chdir(new_path) for i in range(len(files[-1])): # geracao de arquivo # maxv = np.iinfo(np.int16).max # librosa.output.write_wav( # files[-1][i] + "_filtered_int16.wav", (instances_list[i] * maxv).astype(np.int16), rate_list[i] # ) librosa.output.write_wav(files[-1][i] + "_filtered.wav", instances_list[i], rate_list[i]) print('Done with audio: ' + files[-1][i]) print('Done with: ' + base + '\n')
def main_a(): files = [] all_features = [] for base in bases_A: instances_list = [] features_list = [] filtered_instances_list = [] files.append(func.get_filenames( path=base, filetype='.wav')) # gera array com nome dos audios for i in range(len(files[-1])): # instance, rate = func.load_sound_files(base + '/' + files[-1][i]) # rate, instance = wavfile.read(base + '/' + files[-1][i]) instance, rate = librosa.load(base + '/' + files[-1][i]) # [rate, instance] = audioBasicIO.readAudioFile(base + '/' + files[-1][i]) instances_list.append(instance) # filtragem e decomposicao rec_signal = func.wavelet_filtering(instance, th, rate) filtered_instances_list.append(rec_signal) # extracao de caracteriticas features = func.extract_feature(instance, rate) # librosa # features = audioFeatureExtraction.stFeatureExtraction(instance, rate, 0.5 * rate, 0.25 * rate) # features = audioFeatureExtraction.mtFeatureExtraction( # instance, rate, 0.5 * rate, 0.5 * rate,0.25 * rate, 0.25 * rate # ) # rotulacao if base == atraning_normal: features.append("Normal") elif base == atraning_mumur: features.append("Murmur") elif base == atraining_extrahls: features.append("Extra Heart Sound") elif base == atraining_artifact: features.append("Artifact") features_list.append(features) all_features.append(features) print('Done with: ' + files[-1][i]) # escritas no CVS dir_db = os.path.basename(base) func.write_csv('librosa_filtered_features_of_' + dir_db, features_list) print('Done with: ' + base + '\n') os.chdir(path_A) func.write_csv("BaseA_librosa_filtered_features", all_features) print 'Done!'
def main_pn(): files = [] labels = [] for base in bases_Physioef: instances_list = [] features_list = [] files.append(func.get_filenames(path=base, filetype='.wav')) labels.append(func.get_filenames(path=base, filetype='.hea')) for i in range(len(files[-1])): # instance, rate = func.load_sound_files(base + '/' + files[-1][i]) # rate, instance = wavfile.read(base + '/' + files[-1][i]) instance, rate = librosa.load(base + '/' + files[-1][i]) # filtragem e decomposicao rec_signal = func.wavelet_filtering(instance, th) instances_list.append(rec_signal) # extracao de caracteriticas features = func.extract_feature(rec_signal, rate) # rotulacao physionet label_object = open(base + '/' + labels[-1][i], 'r') label = label_object.read().split('\n')[-2] features.append(label) # features = func.pyAudioAnalysis_features(recSignal, rate) features_list.append(features) print('Done with: ' + files[-1][i]) func.write_csv(path_Physio + base[-1] + '_filtered', features_list) # dir_db = os.path.basename(base) # func.write_csv('librosa_features_of_' + dir_db, features_list) print('Done with: ' + base + '\n') print 'Done!'
files = [] labels = [] file_paths = [] for base in bases_B: instances = [] files.append(get_filenames(path=base, filetype='.wav')) # labels.append(get_filenames(path=base, filetype='.hea')) # a maneira mais correta de fazer seria dividir em dois loops # assim o entendimento poderia ficar mais facil (?) for i in range(len(files[-1])): # instance, rate = load_sound_files(base + '/' + files[-1][i]) instance, rate = librosa.load(base + '/' + files[-1][i]) rec_signal = func.wavelet_filtering(instance, th=288) plot_imagens(rec_signal, files[-1][i] + " filtered waveplot") # plot_spectogram(rec_signal, files[-1][i] + ' filtered') print (files[-1][i] + ' done!') print('Done with: ' + base + '\n') # print (base + '/' + files[-1][0]) # x, sr = librosa.load(base + '/' + files[-1][0]) # librosa.display.waveplot(x, sr=sr) # plt.show() # files = get_filenames(btraning_mumur, '.wav') # # # carrega arquivos de audio