def gen_dependencies(self, grammarname, dictname): """ Generate the dependencies (grammar, dictionary) for HVite. @param grammarname is the file name of the tokens @param dictname is the dictionary file name """ dictpron = DictPron() with codecs.open(grammarname, 'w', encoding) as flab: for token,pron in zip(self._tokens.split(),self._phones.split()): # dictionary: for variant in pron.split("|"): dictpron.add_pron( token, variant.replace("-"," ") ) if self._infersp is True: variant = variant + '-sil' dictpron.add_pron( token, variant.replace("-"," ") ) # lab file (one token per line) flab.write( token+"\n") dictpron.save_as_ascii( dictname )
def gen_slm_dependencies(self, basename, N=3): """ Generate the dependencies (slm, dictionary) for julius. @param basename (str - IN) the base name of the slm file and of the dictionary file @param N (int) Language model N-gram length. """ dictname = basename + ".dict" slmname = basename + ".arpa" phoneslist = self._phones.split() tokenslist = self._tokens.split() dictpron = DictPron() for token,pron in zip(tokenslist,phoneslist): for variant in pron.split("|"): dictpron.add_pron( token, variant.replace("-"," ") ) if dictpron.is_unk(START_SENT_SYMBOL) is True: dictpron.add_pron( START_SENT_SYMBOL, "sil" ) if dictpron.is_unk(END_SENT_SYMBOL) is True: dictpron.add_pron( END_SENT_SYMBOL, "sil" ) dictpron.save_as_ascii( dictname, False ) # Write the SLM model = NgramsModel(N) model.append_sentences( [self._tokens] ) probas = model.probabilities( method="logml" ) arpaio = ArpaIO() arpaio.set( probas ) arpaio.save( slmname )