Esempio n. 1
0
    def __init__(self, model_name, trans_df):

        from espnet2.bin.asr_inference import Speech2Text
        from espnet_model_zoo.downloader import ModelDownloader
        import jiwer

        self.model_name = model_name
        d = ModelDownloader()
        self.asr_model = Speech2Text(**d.download_and_unpack(model_name))
        self.input_txt_list = []
        self.clean_txt_list = []
        self.output_txt_list = []
        self.transcriptions = []
        self.true_txt_list = []
        self.sample_rate = int(
            d.data_frame[d.data_frame["name"] == model_name]["fs"])
        self.trans_df = trans_df
        self.trans_dic = self._df_to_dict(trans_df)
        self.mix_counter = Counter()
        self.clean_counter = Counter()
        self.est_counter = Counter()
        self.transformation = jiwer.Compose([
            jiwer.ToLowerCase(),
            jiwer.RemovePunctuation(),
            jiwer.RemoveMultipleSpaces(),
            jiwer.Strip(),
            jiwer.SentencesToListOfWords(),
            jiwer.RemoveEmptyStrings(),
        ])
Esempio n. 2
0
    def _calc_metrics(self, ground_truth, hypothesis):
        transformation = jiwer.Compose([
            jiwer.ToLowerCase(),
            jiwer.RemoveMultipleSpaces(),
            jiwer.RemoveWhiteSpace(replace_by_space=" "),
            jiwer.SentencesToListOfWords(word_delimiter=" ")
        ])

        mer = jiwer.mer(ground_truth,
                        hypothesis,
                        truth_transform=transformation,
                        hypothesis_transform=transformation)

        wer = jiwer.wer(ground_truth,
                        hypothesis,
                        truth_transform=transformation,
                        hypothesis_transform=transformation)

        wil = jiwer.wil(ground_truth,
                        hypothesis,
                        truth_transform=transformation,
                        hypothesis_transform=transformation)

        wip = jiwer.wip(ground_truth,
                        hypothesis,
                        truth_transform=transformation,
                        hypothesis_transform=transformation)

        return mer, wer, wil, wip
Esempio n. 3
0
def rmPunctuation(values):
    """preprocess the list of words to RemovePunctuation"""
    newValues = []
    for v in values:
        newValue = jiwer.RemovePunctuation()(v)
        newValue = jiwer.Strip()(newValue)
        newValue = jiwer.RemoveMultipleSpaces()(newValue)
        newValues.append(newValue)
    return newValues
Esempio n. 4
0
def _str_clean(input_string: str) -> str:
    """
    Use jiwer's
    """
    transformation = jiwer.Compose([
        jiwer.ToLowerCase(),
        jiwer.RemoveMultipleSpaces(),
        jiwer.RemoveWhiteSpace(replace_by_space=True),
        jiwer.SentencesToListOfWords(word_delimiter=" ")
    ])
    return transformation(input_string)
Esempio n. 5
0
def extract_contents(path, names):
    sentences = list()
    for filename in names:
        file_to_open = path + "GoogleOutput.".join(filename.split("."))
        try:
            with open(file_to_open, "r") as f:
                sentence = f.read()
                sentences.append(jiwer.RemoveMultipleSpaces()(sentence))
        except:
            sentences.append("unknownvalueerror")
    return sentences
Esempio n. 6
0
def clean(content):
    sentence = jiwer.RemoveKaldiNonWords()(content)
    sentence = sentence.replace("^", "ć")
    sentence = jiwer.SubstituteRegexes({
        r"{": r"š",
        r"`": r"ž",
        r"}": r"đ",
        r"~": r"č",
        r"#": r"dž"
    })(sentence)
    sentence = jiwer.RemoveMultipleSpaces()(sentence)
    return sentence
def calc_wer(ground_truth, hypothesis):
    transformation = jiwer.Compose([
        jiwer.ToLowerCase(),
        jiwer.RemoveMultipleSpaces(),
        jiwer.Strip(),
        jiwer.ExpandCommonEnglishContractions(),
        jiwer.RemovePunctuation()
    ])
    wer = jiwer.wer(ground_truth,
                    hypothesis,
                    truth_transform=transformation,
                    hypothesis_transform=transformation)
    return wer
Esempio n. 8
0
def get_paired_text_corrected(batch):
    sentences = list()
    for filename in batch:
        with open(path_txt + filename, "r") as f:
            sentence = jiwer.RemoveKaldiNonWords()(f.read())
            sentence = sentence.replace("^", "ć")
            sentences.append(jiwer.RemoveMultipleSpaces()(sentence))
            sentences = jiwer.SubstituteRegexes({
                r"{": r"š",
                r"`": r"ž",
                r"}": r"đ",
                r"~": r"č",
                r"#": r"dž"
            })(sentences)
    return sentences
Esempio n. 9
0
def sentence_wer(reference: str, prediction: str):
    transformation = jiwer.Compose([
        jiwer.RemoveMultipleSpaces(),
        jiwer.RemovePunctuation(),
        jiwer.Strip(),
        jiwer.ToLowerCase(),
        jiwer.ExpandCommonEnglishContractions(),
        jiwer.RemoveWhiteSpace(replace_by_space=True),
        jiwer.SentencesToListOfWords(),
        jiwer.RemoveEmptyStrings(),
    ])

    return jiwer.wer(reference.strip(),
                     prediction.strip(),
                     truth_transform=transformation,
                     hypothesis_transform=transformation)
Esempio n. 10
0
def normalize_sentence(sentence):
    """Normalize sentence"""
    # Convert all characters to upper.
    sentence = sentence.upper()
    # Delete punctuations.
    sentence = jiwer.RemovePunctuation()(sentence)
    # Remove \n, \t, \r, \x0c.
    sentence = jiwer.RemoveWhiteSpace(replace_by_space=True)(sentence)
    # Remove multiple spaces.
    sentence = jiwer.RemoveMultipleSpaces()(sentence)
    # Remove white space in two end of string.
    sentence = jiwer.Strip()(sentence)

    # Convert all characters to upper.
    sentence = sentence.upper()

    return sentence
Esempio n. 11
0
def analyze():
    try:
        req_data = request.get_json()

        compose_rule_set = []
        if req_data.get('to_lower_case', False) == True:
            compose_rule_set.append(jiwer.ToLowerCase())
        if req_data.get('strip_punctuation', False) == True:
            compose_rule_set.append(jiwer.RemovePunctuation())
        if req_data.get('strip_words', False) == True:
            compose_rule_set.append(jiwer.Strip())
        if req_data.get('strip_multi_space', False) == True:
            compose_rule_set.append(jiwer.RemoveMultipleSpaces())
        word_excepts = req_data.get('t_words', '')
        if word_excepts != '':
            words = [a.strip() for a in word_excepts.split(",")]
            compose_rule_set.append(jiwer.RemoveSpecificWords(words))

        compose_rule_set.append(
            jiwer.RemoveWhiteSpace(
                replace_by_space=req_data.get('replace_whitespace', False)))

        transformation = jiwer.Compose(compose_rule_set)

        measures = jiwer.compute_measures(req_data.get('s_truth', ""),
                                          req_data.get('s_hypo', ""),
                                          truth_transform=transformation,
                                          hypothesis_transform=transformation)

        return jsonify({
            "wer": measures['wer'],
            "mer": measures['mer'],
            "wil": measures['wil']
        })
    except:
        return jsonify("API endpoint Error")
Esempio n. 12
0
def metric(ref_trans, asr_trans, lang):
    if lang == "en":
        transformation = jiwer.Compose([
            jiwer.Strip(),
            jiwer.ToLowerCase(),
            jiwer.RemoveWhiteSpace(replace_by_space=True),
            jiwer.RemoveMultipleSpaces(),
            jiwer.SentencesToListOfWords(word_delimiter=" "),
            jiwer.RemoveEmptyStrings(),
            jiwer.RemovePunctuation(),
        ])
        wer = jiwer.wer(
            ref_trans,
            asr_trans,
            truth_transform=transformation,
            hypothesis_transform=transformation,
        )
    elif lang == "cn":
        del_symblos = re.compile(r"[^\u4e00-\u9fa5]+")
        for idx in range(len(asr_trans)):
            sentence = re.sub(del_symblos, "", asr_trans[idx])
            sentence = list(sentence)
            sentence = " ".join(sentence)
            asr_trans[idx] = sentence

            sentence = re.sub(del_symblos, "", ref_trans[idx])
            sentence = list(sentence)
            sentence = " ".join(sentence)
            ref_trans[idx] = sentence
        asr_valid = set(asr_trans)
        assert len(asr_valid) == len(asr_trans)
        wer = jiwer.wer(ref_trans, asr_trans)

    else:
        raise ("Args error!")
    return wer
Esempio n. 13
0
                        break
                    tstart = next_sub.start.hours * 3600 + next_sub.start.minutes * 60 + next_sub.start.seconds + next_sub.start.milliseconds / 1000
                    if (tstart - end) > 0.5:
                        srt.push(next_sub)
                        break
                    end = next_sub.end.hours * 3600 + next_sub.end.minutes * 60 + next_sub.end.seconds + next_sub.end.milliseconds / 1000

                    ground_truth = ground_truth + " " + next_sub.text_without_tags
                    hypothesis = kd.query_text(start, end)
                else:
                    break
            kd.mark_words(start, end)

            transformation = jiwer.Compose([
                jiwer.ToLowerCase(),
                jiwer.RemoveMultipleSpaces(),
                jiwer.RemoveWhiteSpace(replace_by_space=True),
                jiwer.SentencesToListOfWords(),
                jiwer.RemovePunctuation(),
                jiwer.RemoveEmptyStrings(),
                jiwer.SubstituteRegexes({r"ё": r"е"})
            ])
            gt = transformation([ground_truth])
            hp = transformation([hypothesis])

            gt, hp = replace_pairs(gt, hp)
            hp, gt = replace_pairs(hp, gt)

            wer(gt, hp)

            r = jiwer.compute_measures(