示例#1
0
def align_progress(audio_f, transcript, proto_langdir, nnet_dir, want_progress=False):
    if len(transcript.strip()) == 0:
        # Fall back on normal transcription if no transcript is provided
        logging.info("Falling back on normal transcription")
        for ret in _normal_transcribe(audio_f, proto_langdir, nnet_dir, want_progress=want_progress):
            yield show_progress(ret)
        return

    vocab_path = os.path.join(proto_langdir, "graphdir/words.txt")
    with open(vocab_path) as f:
        vocab = metasentence.load_vocabulary(f)

    ms = metasentence.MetaSentence(transcript, vocab)

    ks = ms.get_kaldi_sequence()

    gen_hclg_filename = language_model.make_bigram_language_model(ks, proto_langdir)
    k = None
    try:
        k = standard_kaldi.Kaldi(nnet_dir, gen_hclg_filename, proto_langdir)

        for tran in k.transcribe_progress(audio_f):
            if want_progress:
                yield show_progress(tran, ms)
        if not want_progress:
            yield make_alignment(tran, ms)
    finally:
        if k:
            k.stop()
        os.unlink(gen_hclg_filename)
def lm_transcribe_progress(audio_f, transcript, proto_langdir, nnet_dir):

    if len(transcript.strip()) == 0:
        # Fall back on normal transcription if no transcript is provided
        logging.info("Falling back on normal transcription")
        for ret in _normal_transcribe(audio_f, proto_langdir, nnet_dir):
            yield ret
        return
    
    vocab_path = os.path.join(proto_langdir, "graphdir/words.txt")
    with open(vocab_path) as f:
        vocab = metasentence.load_vocabulary(f)

    ms = metasentence.MetaSentence(transcript, vocab)

    ks = ms.get_kaldi_sequence()

    gen_hclg_filename = language_model.make_bigram_language_model(ks, proto_langdir)
    try:
        k = standard_kaldi.Kaldi(nnet_dir, gen_hclg_filename, proto_langdir)

        ret = None
        for trans in k.transcribe_progress(audio_f):
            ret = diff_align.align(trans["words"], ms)
            yield {
                "transcript": transcript,
                "words": ret,
            }
    finally:
        k.stop()
        os.unlink(gen_hclg_filename)
示例#3
0
def lm_transcribe_progress(audio_f, transcript, proto_langdir, nnet_dir):

    if len(transcript.strip()) == 0:
        # Fall back on normal transcription if no transcript is provided
        logging.info("Falling back on normal transcription")
        for ret in _normal_transcribe(audio_f, proto_langdir, nnet_dir):
            yield ret
        return

    vocab_path = os.path.join(proto_langdir, "graphdir/words.txt")
    with open(vocab_path) as f:
        vocab = metasentence.load_vocabulary(f)

    ms = metasentence.MetaSentence(transcript, vocab)

    ks = ms.get_kaldi_sequence()

    gen_hclg_filename = language_model.make_bigram_language_model(
        ks, proto_langdir)
    try:
        k = standard_kaldi.Kaldi(nnet_dir, gen_hclg_filename, proto_langdir)

        ret = None
        for trans in k.transcribe_progress(audio_f):
            ret = diff_align.align(trans["words"], ms)
            yield {
                "transcript": transcript,
                "words": ret,
            }
    finally:
        k.stop()
        os.unlink(gen_hclg_filename)
示例#4
0
def align_progress(audio_f,
                   transcript,
                   proto_langdir,
                   nnet_dir,
                   want_progress=False):
    if len(transcript.strip()) == 0:
        # Fall back on normal transcription if no transcript is provided
        logging.info("Falling back on normal transcription")
        for ret in _normal_transcribe(audio_f,
                                      proto_langdir,
                                      nnet_dir,
                                      want_progress=want_progress):
            yield show_progress(ret)
        return

    vocab_path = os.path.join(proto_langdir, "graphdir/words.txt")
    with open(vocab_path) as f:
        vocab = metasentence.load_vocabulary(f)

    ms = metasentence.MetaSentence(transcript, vocab)

    ks = ms.get_kaldi_sequence()

    gen_hclg_filename = language_model.make_bigram_language_model(
        ks, proto_langdir)
    k = None
    try:
        k = standard_kaldi.Kaldi(nnet_dir, gen_hclg_filename, proto_langdir)

        for tran in k.transcribe_progress(audio_f):
            if want_progress:
                yield show_progress(tran, ms)
        if not want_progress:
            yield make_alignment(tran, ms)
    finally:
        if k:
            k.stop()
        os.unlink(gen_hclg_filename)
示例#5
0
def by_word(opcodes):
    '''Take difflib.SequenceMatcher.get_opcodes() output and
    return an equivalent opcode sequence that only modifies
    one word at a time'''
    for op, s1, e1, s2, e2 in opcodes:
        if op == 'delete':
            for i in range(s1, e1):
                yield (op, i, i + 1, s2, s2)
        elif op == 'insert':
            for i in range(s2, e2):
                yield (op, s1, s1, i, i + 1)
        else:
            for i1, i2 in zip(range(s1, e1), range(s2, e2)):
                yield (op, i1, i1 + 1, i2, i2 + 1)


if __name__ == '__main__':
    TEXT_FILE = sys.argv[1]
    JSON_FILE = sys.argv[2]
    OUTPUT_FILE = sys.argv[3]

    with open('data/graph/words.txt') as f:
        vocab = metasentence.load_vocabulary(f)

    ms = metasentence.MetaSentence(open(TEXT_FILE).read(), vocab)
    alignment = json.load(open(JSON_FILE))['words']

    out = align(alignment, ms)

    json.dump(out, open(OUTPUT_FILE, 'w'), indent=2)
示例#6
0
def by_word(opcodes):
    '''Take difflib.SequenceMatcher.get_opcodes() output and
    return an equivalent opcode sequence that only modifies
    one word at a time'''
    for op, s1, e1, s2, e2 in opcodes:
        if op == 'delete':
            for i in range(s1, e1):
                yield (op, i, i+1, s2, s2)
        elif op == 'insert':
            for i in range(s2, e2):
                yield (op, s1, s1, i, i+1)
        else:
            for i1, i2 in zip(range(s1, e1), range(s2, e2)):
                yield (op, i1, i1 + 1, i2, i2 + 1)

if __name__=='__main__':
    TEXT_FILE = sys.argv[1]
    JSON_FILE = sys.argv[2]
    OUTPUT_FILE = sys.argv[3]

    with open('data/graph/words.txt') as f:
        vocab = metasentence.load_vocabulary(f)

    ms = metasentence.MetaSentence(open(TEXT_FILE).read(), vocab)
    alignment = json.load(open(JSON_FILE))['words']

    out = align(alignment, ms)
    
    json.dump(out, open(OUTPUT_FILE, 'w'), indent=2)