phoneme_count_per_fold = np.zeros((len(experiment_folders), number_of_phonemes)) for i, experiment in enumerate(experiment_folders): per_per_experiment, poa_afer_per_experiment, moa_afer_per_experiment,\ poa_cm_per_experiment, moa_cm_per_experiment = [[],[],[],[],[]] for fold in range(1,6): t = time.time() test_folder = [folder for folder in os.listdir(os.path.join("./experiments/",experiment,str(fold))) if partition in folder] wer_details = os.path.join("./experiments/",experiment,str(fold),test_folder[0],"wer_details","per_utt") corpus = WERDetails(wer_details) per_per_experiment.append(corpus.all_pers()) poa_afer_per_experiment.append(corpus.all_poa_afers()) moa_afer_per_experiment.append(corpus.all_moa_afers()) poa_cm_per_experiment.append(corpus.poa_confusion_matrix()) moa_cm_per_experiment.append(corpus.moa_confusion_matrix()) s = time.time() - t print("Fold took", s, "seconds") if i == 0: phoneme_type, phoneme_counts = np.unique(corpus.all_ref_phonemes, return_counts=True) phoneme_count_per_fold[fold - 1, :] = phoneme_counts per.append(per_per_experiment) poa_afer.append(poa_afer_per_experiment) moa_afer.append(moa_afer_per_experiment) poa_cm.append(poa_cm_per_experiment)
preprocessing = False separation = True if separation: files = glob("experiments/voicefilter_experiment/*_ss_result.txt") else: files = glob("experiments/voicefilter_experiment/*_se_result.txt") config = HParam("../configs/eng_espnet.yaml") if preprocessing: dfs = list() for file in files: wer_details = WERDetails(file, skip_calculation=False, config=config) phoneme, other = wer_details.all_pers() dfs.append(pd.DataFrame(data=[other[1:]], columns=phoneme[1:], index=[file])) result = pd.concat(dfs, axis=0, join="outer") if separation: result.to_csv("csvs/separation_results_with_clean.csv") else: result.to_csv("csvs/enhancement_results_with_clean.csv") else: if separation: df = pd.read_csv("../csvs/separation_results_with_clean.csv") else: