def test_word_to_phoneme(self):
     wer_details = WERDetails("../experiments/baseline/wer_details/per_utt",
                              skip_calculation=True)
     self.assertEqual(wer_details.word_to_phoneme("cheese", True),
                      ["CH", "IY", "Z"])
     self.assertEqual(wer_details.word_to_phoneme("cheese", False),
                      ["CH", "IY1", "Z"])
def articulatory_barplot(poa_afer: list, label_list: list, reference_label: list, filename: str, phoneme_count: np.ndarray, per: list):
    """
    Generates articulatory barplot

    :param poa_afer:
    :param label_list:
    :param reference_label:
    :return:
    """

    format_dict = {
        "width_fig": 3.14,
        "height_fig": 3.14 * 0.8, # 0.62
        "dpi": 100,
        "width": 0.13, # the width of the bars
        "fontsize": 7,
        "capsize": 1.5
    }

    num_architectures = len(label_list)
    x = np.arange(len(reference_label))

    mean_experiments = list()
    std_experiments = list()

    for experiment in poa_afer:
        selected_afer = np.zeros((len(experiment),len(reference_label)))
        for i, labelafer in enumerate(experiment):
            label, afer = labelafer

            idx = [label.index(lab) for lab in reference_label]
            afer = np.array(afer)[idx]
            selected_afer[i,:] = afer

        mean_afer = np.mean(selected_afer,axis=0)
        std_afer = np.std(selected_afer,axis=0)

        mean_experiments.append(mean_afer)
        std_experiments.append(std_afer)

    #width_logic = np.linspace(start=-format_dict["width"]*2,stop=format_dict["width"],num=num_architectures)

    # Phoeme counts

    wer_details = WERDetails("../experiments/baseline/wer_details/per_utt", skip_calculation=True)
    phoneme_labels = per[0][0][0]

    if "moa" in filename:
        af_labels = np.array([wer_details.phoneme_to_moa(phoneme_label) for phoneme_label in phoneme_labels])
    if "poa" in filename:
        af_labels = np.array([wer_details.phoneme_to_poa(phoneme_label) for phoneme_label in phoneme_labels])

    phoneme_counts = np.mean(phoneme_count,axis=0)
    af_counts = [round_to_n(np.sum(phoneme_counts[np.where(af_labels == af)]),2) for af in reference_label]

    fig = plt.figure(num=None, figsize=(format_dict["width_fig"], format_dict["height_fig"]),
                    dpi=format_dict["dpi"], facecolor='w', edgecolor='k')

    markers = ["v","^","<",">","x"]
    for i in range(num_architectures):
        # legend_props = {"elinewidth": 0.5}
        # plt.bar(x + format_dict["width"]/2 + width_logic[i],
        #             mean_experiments[i],format_dict["width"],
        #             label=label_list[i],yerr=std_experiments[i],
        #             capsize=format_dict["capsize"], error_kw=legend_props)
        plt.plot(x, mean_experiments[i], markersize=4, marker=markers[i])
        plt.fill_between(x, mean_experiments[i] - std_experiments[i], mean_experiments[i] + std_experiments[i],
                         alpha=0.2)
        r, p = pearsonr(af_counts, mean_experiments[i])
        print(label_list[i], " correlation btw data amount and performance", r, "p value", p)


        ax = plt.gca()
    ax.set_axisbelow(True)
    ax.yaxis.grid(color='gray', linestyle='dashed',which="major")

    ax.axhline(y=0, color="black",linewidth=0.8)
    ax.set_xticks(x)

    ax.tick_params(axis='both', which='major', labelsize=format_dict["fontsize"], pad=1)

    reference_label_to_plot = [ref + " " + str(int(af_counts[i])) for i,ref in enumerate(reference_label)]
    ax.set_xticklabels(reference_label_to_plot, rotation=45, fontsize=format_dict["fontsize"])

    plt.ylabel("AFER (%)",fontsize=format_dict["fontsize"])

    plt.legend(label_list, fontsize=6,
               loc='upper center', bbox_to_anchor=(0.5, +1.32),
               fancybox=True, shadow=True, ncol=(num_architectures//2))
    plt.xlim([0,len(mean_experiments)])
    plt.ylim([20,80])
    plt.tight_layout(pad=0)
    fig.tight_layout(pad=0)
    fig.set_size_inches(format_dict["width_fig"], format_dict["height_fig"])

    current_dir = os.path.dirname(__file__)
    plt.savefig(os.path.join(current_dir, "figures/" + filename + ".pdf"), bbox_inches='tight', pad_inches=0.005)
    print(experiment_folders)
    # Purpose of this is to just store pure phoneme count
    phoneme_count_per_fold = np.zeros((len(experiment_folders), number_of_phonemes))
    for i, experiment in enumerate(experiment_folders):

        per_per_experiment, poa_afer_per_experiment, moa_afer_per_experiment,\
            poa_cm_per_experiment, moa_cm_per_experiment = [[],[],[],[],[]]

        for fold in range(1,6):
            t = time.time()
            test_folder = [folder for folder in os.listdir(os.path.join("./experiments/",experiment,str(fold)))
                           if partition in folder]

            wer_details = os.path.join("./experiments/",experiment,str(fold),test_folder[0],"wer_details","per_utt")

            corpus = WERDetails(wer_details)

            per_per_experiment.append(corpus.all_pers())
            poa_afer_per_experiment.append(corpus.all_poa_afers())
            moa_afer_per_experiment.append(corpus.all_moa_afers())

            poa_cm_per_experiment.append(corpus.poa_confusion_matrix())
            moa_cm_per_experiment.append(corpus.moa_confusion_matrix())
            s = time.time() - t
            print("Fold took", s, "seconds")

            if i == 0:
                phoneme_type, phoneme_counts = np.unique(corpus.all_ref_phonemes, return_counts=True)
                phoneme_count_per_fold[fold - 1, :] = phoneme_counts
        per.append(per_per_experiment)
        poa_afer.append(poa_afer_per_experiment)
    def test_poa_to_phonemes(self):
        wer_details = WERDetails("../experiments/baseline/wer_details/per_utt",
                                 skip_calculation=True)

        self.assertEqual(sorted(wer_details.poa_to_phonemes("Bilabial")),
                         sorted(["B", "P", "EM", "W", "M"]))
 def test_clean_non_words(self):
     self.assertEqual(WERDetails.clean_non_words(["aaa", "***", "bbb"]),
                      ["aaa", "bbb"])
 def test_arpabet_cleaner(self):
     self.assertEqual(WERDetails.arpabet_cleaner("{CH} {IY1} {Z}", True),
                      ["CH", "IY", "Z"])
     self.assertEqual(WERDetails.arpabet_cleaner("{CH} {IY1} {Z}", False),
                      ["CH", "IY1", "Z"])
 def test_word_in_cmu_dict(self):
     wer_details = WERDetails("../experiments/baseline/wer_details/per_utt",
                              skip_calculation=True)
     self.assertEqual(wer_details.word_in_cmu_dict("{asdfhj}"), True)
     self.assertEqual(wer_details.word_in_cmu_dict("asdfhj"), False)
 def test_phoneme_to_poa(self):
     wer_details = WERDetails("../experiments/baseline/wer_details/per_utt",
                              skip_calculation=True)
     self.assertEqual(wer_details.phoneme_to_poa("P"), "Bilabial")
예제 #9
0
import time
from corpus import WERDetails
from utils import HParam
#partition = "test"
#number_of_phonemes = 40
preprocessing = True

import numpy as np
import pandas as pd

if preprocessing:
    config = HParam("configs/dutch.yaml")
    wer_details = WERDetails(
        "experiments/jasmin_example/scoring_kaldi/wer_details/per_utt",
        skip_calculation=False,
        config=config)

    #phoneme, other = wer_details.all_poa_afers()

    t = time.time()
    moa_mat = wer_details.moa_confusion_matrix()
    s = time.time()
    print(s - t, "secs")
    poa_mat = wer_details.poa_confusion_matrix()
    k = time.time()
    print(k - s, "secs")
    print(poa_mat)
    print()

    #df = pd.DataFrame(data=other, index=phoneme)

preprocessing = False
separation = True
if separation:
    files = glob("experiments/voicefilter_experiment/*_ss_result.txt")
else:
    files = glob("experiments/voicefilter_experiment/*_se_result.txt")
config = HParam("../configs/eng_espnet.yaml")



if preprocessing:
    dfs = list()
    for file in files:
        wer_details = WERDetails(file, skip_calculation=False, config=config)
        phoneme, other = wer_details.all_pers()

        dfs.append(pd.DataFrame(data=[other[1:]], columns=phoneme[1:], index=[file]))


    result = pd.concat(dfs, axis=0, join="outer")

    if separation:
        result.to_csv("csvs/separation_results_with_clean.csv")
    else:
        result.to_csv("csvs/enhancement_results_with_clean.csv")
else:

    if separation:
        df = pd.read_csv("../csvs/separation_results_with_clean.csv")