def main(model_name, dictionary, language_model, sequitur_model=None, debug=0,
         verbose=False, prompt_words=False, *audio_corpora):

    misc.init_app('speech_kaldi_export')

    if verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    language_model_dir = LANGUAGE_MODELS_DIR.resolve() / language_model
    exit_if_language_model_dir_doesnt_exist(language_model_dir)

    config = misc.load_config ('.speechrc')

    work_dir = ASR_MODELS_DIR / 'kaldi' / model_name
    kaldi_root = config.get("speech", "kaldi_root")

    data_dir = work_dir / "data"
    mfcc_dir = work_dir / "mfcc"

    wav16_dir = config.get("speech", "wav16")

    create_basic_work_dir_structure(
        str(data_dir),
        wav16_dir,
        str(mfcc_dir),
        str(work_dir),
        str(language_model_dir),
        kaldi_root)

    if sequitur_model:
        sequitur_model_path = str(SEQUITUR_MODEL_DIR / sequitur_model)
    else:
        sequitur_model_path = None

    generate_speech_and_text_corpora(data_dir,
                                     wav16_dir,
                                     debug,
                                     sequitur_model_path,
                                     dictionary,
                                     audio_corpora,
                                     prompt_words)

    copy_scripts_and_config_files(work_dir, kaldi_root)
Example #2
0
def main(verbose=False, *speech_corpora):
    """Scan directory for audio files and convert them to wav files

    For each speech corpus `speech_corpus`

    1. the resulting wav files are written to the directory
       `.speechrc.wav16`/<speech_corpus>/

    2. the transcripts in data/src/speech/<speech_corpus>/transcripts_*.csv are
       updated.
    """
    misc.init_app('speech_audio_scan')

    config = misc.load_config('.speechrc')

    speech_corpora_dir = Path(config.get("speech", "speech_corpora"))
    wav16 = Path(config.get("speech", "wav16"))

    if len(speech_corpora) < 1:
        logging.error("At least one speech corpus must be provided.")
        sys.exit(1)

    if verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    exit_if_corpus_is_missing(speech_corpora_dir, speech_corpora)

    for speech_corpus in speech_corpora:
        transcripts = Transcripts(corpus_name=speech_corpus, create_db=True)
        out_wav16_subdir = wav16 / speech_corpus
        out_wav16_subdir.mkdir(parents=True, exist_ok=True)
        in_root_corpus_dir = speech_corpora_dir / speech_corpus

        scan_audiodir(str(in_root_corpus_dir), transcripts,
                      str(out_wav16_subdir))

        transcripts.save()
        print speech_corpus, "new transcripts saved."
        print
def main(corpus, verbose=False):
    """Generate training sentences for language models

    Let text_corpus be the argument given on the command line.
    Then the corpus text_corpus is tokenized and each sentence is written on a
    separate line into `data/dst/text-corpora/<text_corpus>.txt`. All
    punctuation marks are stripped.
    """
    init_app('speech_sentences')

    if verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    config = load_config('.speechrc')

    TEXT_CORPORA_DIR.mkdir(parents=True, exist_ok=True)

    out_file = TEXT_CORPORA_DIR / (corpus + ".txt")

    with codecs.open(str(out_file), "w", "utf-8") as outf:
        # I haven't figured out how to refactor the processing algorithms of the
        # parole corpus to implement a generator.
        if corpus == "parole_de":
            corpus_path = config.get("speech", corpus)
            proc_parole_de(corpus_path, load_punkt_tokenizer, outf)
        elif corpus in TEXT_CORPORA:
            corpus_path = config.get("speech", corpus)
            for sentence in TEXT_CORPORA[corpus](corpus_path):
                outf.write(sentence + "\n")
        elif corpus in SPEECH_CORPORA:
            for sentence in SPEECH_CORPORA[corpus]():
                outf.write(sentence + "\n")
        else:
            raise Exception("This shouldn't happen.")

    logging.info('%s written.' % out_file)
def main(verbose=False, debug_sgm_limit=0):
    """Train the Punkt tokenizer on the German Parole corpus"""
    init_app('speech_sentences')

    if verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    config = load_config('.speechrc')

    parole_path = config.get("speech", "parole_de")

    logging.info("training punkt...")

    punkt_trainer = nltk.tokenize.punkt.PunktTrainer()

    train_punkt_wrapper = parole.TrainPunktWrapper(punkt_trainer)

    parole.parole_crawl(parole_path, train_punkt_wrapper.train_punkt,
                        debug_sgm_limit)

    logging.info("finalizing punkt training...")
    punkt_trainer.finalize_training(verbose=True)
    logging.info("punkt training done. %d text segments." %
                 train_punkt_wrapper.punkt_count)

    params = punkt_trainer.get_params()
    # print "Params: %s" % repr(params)

    parole.PUNKT_PICKLEFN.parent.mkdir(parents=True, exist_ok=True)
    tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer(params)
    with open(str(parole.PUNKT_PICKLEFN), mode='wb') as f:
        pickle.dump(tokenizer, f, protocol=pickle.HIGHEST_PROTOCOL)

    logging.info('%s written.' % parole.PUNKT_PICKLEFN)
Example #5
0
PROC_TITLE = 'speech_build_lm'

SENTENCES_STATS = 100000

LANGUAGE_MODELS_DIR = "data/dst/lm"
TEXT_CORPORA_DIR = "data/dst/text-corpora"

DEFAULT_ORDER = 4
DEFAULT_PRUNE = '0 3 5'

#
# init
#

init_app(PROC_TITLE)

#
# config
#

config = load_config('.speechrc')

#
# commandline
#

parser = OptionParser(
    "usage: %prog [options] <language_model> <text_corpus> [ <text_corpus2> ... ]"
)
Example #6
0
        return 'http://www.wikidata.org/entity/' + p[36:]
    if p.startswith('http://www.wikidata.org/qualifier/'):
        return 'http://www.wikidata.org/entity/' + p[34:]
    if p.startswith('http://www.wikidata.org/prop/statement/'):
        return 'http://www.wikidata.org/entity/' + p[39:]
    if p.startswith('http://www.wikidata.org/prop/'):
        return 'http://www.wikidata.org/entity/' + p[29:]
    return None

DEFAULT_OUTPUT='foo.n3'

#
# init, cmdline
#

misc.init_app('ldfmirror')

parser = OptionParser("usage: %prog [options]")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #7
0
import requests

from time import time
from optparse import OptionParser

from nltools import misc

import pprint
from rdflib.plugins.sparql import parser, algebra
from rdflib import term, Graph

#
# init, cmdline
#

misc.init_app('dbpmirror')

option_parser = OptionParser("usage: %prog [options] foo.aiml")

option_parser.add_option("-v",
                         "--verbose",
                         action="store_true",
                         dest="verbose",
                         help="verbose output")

(options, args) = option_parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #8
0
from nltools import misc
from nltools.tokenizer import tokenize
from nltools.phonetics import ipa2xsampa, xsampa2ipa
from nltools.sequiturclient import sequitur_gen_ipa

from speech_transcripts import Transcripts
from speech_lexicon import Lexicon

SEQUITUR_MODEL = 'data/models/sequitur-%s-latest'

#
# init
#

misc.init_app('speech_lex_missing')

#
# command line
#

parser = OptionParser("usage: %prog [options] [filter] lex corpus")

parser.add_option("-g",
                  "--generate",
                  action="store_true",
                  dest="generate",
                  help="generate phoneme transcriptions using sequitur g2p")

parser.add_option("-l",
                  "--lang",
Example #9
0
                outf.write(u"]")
        else:
            outf.write(u"                   %% %s" % unicode(resp))

        outf.write(u")\n")

    td_count += 1
    if td_count % 2 == 0:
        outf.write("\n")


#
# init, cmdline
#

misc.init_app('aip2py')

parser = OptionParser("usage: %prog [options] foo.aip")

parser.add_option("-v",
                  "--verbose",
                  action="store_true",
                  dest="verbose",
                  help="verbose output")
parser.add_option("-o",
                  "--output",
                  dest="outputfn",
                  type="string",
                  default=DEFAULT_OUTPUT,
                  help="output file, default: %s" % DEFAULT_OUTPUT)
Example #10
0
import codecs
import traceback

from optparse import OptionParser
from nltools import misc
from nltools.phonetics import ipa2xsampa

from speech_lexicon import Lexicon

DEFAULT_DICT = 'dict-de.ipa'

#
# init terminal
#

misc.init_app('speech_sequitur_export')

#
# commandline
#

parser = OptionParser("usage: %prog [options] ")

parser.add_option("-d",
                  "--dict",
                  dest="dict_name",
                  type="str",
                  default=DEFAULT_DICT,
                  help="dict to export (default: %s)" % DEFAULT_DICT)
parser.add_option("-v",
                  "--verbose",
Example #11
0
import time
import xml.etree.ElementTree as ET
import numpy as np

from optparse               import OptionParser
from gensim.models          import word2vec
from nltools                import misc, tokenizer
from align_model            import AlignModel

OUTPUT_DIR  = 'out'

#
# init, cmdline
#

misc.init_app('csv_align')

parser = OptionParser("usage: %prog [options] foo.csv")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)

if len(args) != 1:
    parser.print_usage()
Example #12
0
import requests

from time import time
from optparse import OptionParser

from nltools import misc

import pprint
from rdflib.plugins.sparql import parser, algebra
from rdflib import term, Graph

#
# init, cmdline
#

misc.init_app('wkdmirror')

option_parser = OptionParser("usage: %prog [options] foo.aiml")

option_parser.add_option("-v",
                         "--verbose",
                         action="store_true",
                         dest="verbose",
                         help="verbose output")

(options, args) = option_parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #13
0
import logging
import random
import time
import rdflib
import dateutil.parser

from copy        import deepcopy
from optparse    import OptionParser
from nltools     import misc
from config      import RDF_PREFIXES

#
# init, config,  cmdline
#

misc.init_app('prefixtool')

parser = OptionParser("usage: %prog [options]")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)

#
# main
Example #14
0
from optparse               import OptionParser
from gensim.models          import word2vec
from zamiaai                import model
from nltools                import misc
from nltools.tokenizer      import tokenize
from sqlalchemy.orm         import sessionmaker

DEFAULT_OUTPUT       = 'bar.py'
DEFAULT_LENGTH_LIMIT = 12

#
# init, cmdline
#

misc.init_app('csv2py')

parser = OptionParser("usage: %prog [options] foo.csv")

parser.add_option ("-l", "--length-limit", dest="length_limit", type = "int", default=DEFAULT_LENGTH_LIMIT,
                   help="length limit in words, default: %d" % DEFAULT_LENGTH_LIMIT)
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)
parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
Example #15
0
                    outf.write(u"                    u\"%s\"" % i)
                outf.write (u"]")
        else:
                outf.write(u"                   %% %s" % unicode(resp))

        outf.write(u")\n")

    td_count += 1
    if td_count % 2 == 0:
        outf.write("\n")

#
# init, cmdline
#

misc.init_app('aip2py')

parser = OptionParser("usage: %prog [options] foo.aip")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #16
0
import logging
import time
import numpy as np

from optparse               import OptionParser
from zamiaai                import model
from nltools                import misc
from nltools.tokenizer      import tokenize
from sqlalchemy.orm         import sessionmaker
from align_model            import AlignModel

#
# init, cmdline
#

misc.init_app('train_model')

parser = OptionParser("usage: %prog [options]")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)

#
# db
Example #17
0
from optparse          import OptionParser
from nltools.misc      import compress_ws, load_config, init_app
from nltools.tokenizer import tokenize

SENTENCEFN      = 'data/dst/speech/en/sentences.txt'
SENTENCES_STATS = 1000

DEBUG_LIMIT     = 0
# DEBUG_LIMIT     = 1000

#
# init 
#

init_app ('speech_sentences')

config = load_config ('.speechrc')

europarl       = config.get("speech", "europarl_en")
movie_dialogs  = config.get("speech", "cornell_movie_dialogs")
web_questions  = config.get("speech", "web_questions")
yahoo_answers  = config.get("speech", "yahoo_answers")

#
# commandline parsing
#

parser = OptionParser("usage: %prog [options] )")

parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
Example #18
0
                     ])

    test_cnt += 1

    clause = Clause(head=head)

    print unicode(clause)

    # import pdb; pdb.set_trace()


#
# init, cmdline
#

misc.init_app('pl2nlp')

parser = OptionParser("usage: %prog [options] foo.pl")

parser.add_option("-v",
                  "--verbose",
                  action="store_true",
                  dest="verbose",
                  help="verbose output")
parser.add_option("-o",
                  "--output",
                  dest="outputfn",
                  type="string",
                  default=DEFAULT_OUTPUT,
                  help="output file, default: %s" % DEFAULT_OUTPUT)
Example #19
0
            outf.write(u'\n    %% actions %s'    % repr(ivr_actions))

        if offset == len(ivrs):
            outf.write ('.\n')
        else:
            outf.write (',\n')

    # import pdb; pdb.set_trace()



#
# init, cmdline
#

misc.init_app('pl2aip')

parser = OptionParser("usage: %prog [options] foo.pl")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #20
0
            outf.write(u'\n    %% actions %s'    % repr(ivr_actions))

        if offset == len(ivrs):
            outf.write ('.\n')
        else:
            outf.write (',\n')

    # import pdb; pdb.set_trace()



#
# init, cmdline
#

misc.init_app('pl2aip')

parser = OptionParser("usage: %prog [options] foo.pl")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #21
0
#
#
# look up IMDB top movies in wikidata
#

import sys
import codecs
import re
import requests
import urllib2
import urllib
from bs4 import BeautifulSoup

from nltools import misc

misc.init_app ('gen_movies')

WD_ENDPOINT = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql?'

def remote_sparql(query, response_format='application/sparql-results+json'):

    url = WD_ENDPOINT + urllib.urlencode({'query': query})

    # print url

    response = requests.get(
      url,
      headers = {"accept": response_format},
    )
    return response
Example #22
0
import codecs
import readline 
 
from builtins               import input
from optparse               import OptionParser
from zamiaai.ai_kernal      import AIKernal, AIContext, USER_PREFIX, LANGUAGES
from nltools                import misc
from nltools.tokenizer      import tokenize

PROC_TITLE        = 'chatbot'

#
# init 
#

misc.init_app(PROC_TITLE)

#
# command line
#

parser = OptionParser("usage: %prog [options])")

parser.add_option("-v", "--verbose", action="store_true", dest="verbose", 
                  help="enable debug output")

(options, args) = parser.parse_args()

#
# logger
#
from speech_transcripts import Transcripts
from nltools.tokenizer import tokenize

WORKDIR_CONT = 'data/dst/asr-models/cmusphinx_cont/%s'
WORKDIR_PTM = 'data/dst/asr-models/cmusphinx_ptm/%s'

NJOBS = 12

ENABLE_NOISE_FILLER = False  # CMU Sphinx decoding seems to become unstable otherwise
NOISE_WORD = 'nspc'

#
# init
#

misc.init_app('speech_sphinx_export')

config = misc.load_config('.speechrc')

#
# commandline parsing
#

parser = OptionParser(
    "usage: %prog [options] model_name dict lm corpus [corpus2 ...]")

parser.add_option("-l",
                  "--lang",
                  dest="lang",
                  type="str",
                  default='de',
Example #24
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# generate n3 of given names
#

import codecs

from nltools import misc

misc.init_app ('gen_names')

def mangle_name(name):

    res = u''
    for c in name:
        if c.isalpha():
            res += c

    return res

with codecs.open('old/names/head.n3', 'r', 'utf8') as f:
    for line in f:
        print line.strip()

print
Example #25
0
    def do_utterances(self, subcmd, opts, *paths):
        """${cmd_name}: get sample or all utterances from DB

        ${cmd_usage}
        ${cmd_option_list}
        """

        self.kernal.dump_utterances(opts.num_utterances, opts.dictfn,
                                    opts.lang, opts.module)


#
# init terminal
#

misc.init_app('ai_cli')

# reload(sys)
# sys.setdefaultencoding('utf-8')
# sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)

#
# logging
#

logging.basicConfig(level=DEFAULT_LOGLEVEL)
logging.getLogger("requests").setLevel(logging.WARNING)

#
# run cli
#
Example #26
0
from nltools.misc import load_config, init_app, mkdirs
from nltools.tokenizer import tokenize
from speech_transcripts import Transcripts

WORKDIR = 'data/dst/speech/%s/srilm'

SOURCES = ['data/dst/speech/%s/sentences.txt',
           'data/dst/speech/%s/ai-sentences.txt']

SENTENCES_STATS      = 100000

#
# init 
#

init_app ('speech_build_lm')

config = load_config ('.speechrc')

kaldi_root       = config.get("speech", "kaldi_root")
ngram_path       = '%s/tools/srilm/bin/i686-m64/ngram' % kaldi_root
ngram_count_path = '%s/tools/srilm/bin/i686-m64/ngram-count' % kaldi_root

#
# commandline parsing
#

parser = OptionParser("usage: %prog [options] )")

parser.add_option ("-d", "--debug", dest="debug", type='int', default=0,
                   help="limit number of sentences (debug purposes only), default: 0 (unlimited)")
Example #27
0
from optparse import OptionParser

from nltools.asr import ASR
from nltools.pulserecorder import PulseRecorder, MIX_MODE_BOTH, MIX_MODE_LEFT, MIX_MODE_RIGHT
from nltools.vad import VAD
from nltools import misc

DEFAULT_VOLUME = 150
SAMPLE_RATE = 16000
DEFAULT_MIX_MODE = 'both'

#
# init
#

misc.init_app('live_recorder')

#
# commandline parsing
#

parser = OptionParser("usage: %prog [options]")

parser.add_option("-m",
                  "--mix-mode",
                  dest='mix_mode',
                  type='str',
                  default=DEFAULT_MIX_MODE,
                  help="mix mode (left, right, both - default: %s)" %
                  DEFAULT_MIX_MODE)
Example #28
0
from nltools import misc, tokenizer

FILTER_CHARS = [u'[', u']', u'{', u'}', u'(', u')', u'=', u'>', u'<']

DEFAULT_LOGLEVEL   = logging.DEBUG
DEFAULT_OUTPUT     = 'foo.aip'
DEFAULT_LANG       = 'en'
DEFAULT_LIMIT      = 12 # tokens
DEFAULT_PRIO       = -99

#
# init, cmdline
#

misc.init_app('chat2aip')

parser = OptionParser("usage: %prog [options] foo.chat [ bar.chat ... ]")

parser.add_option ("-L", "--limit", dest="limit", type = "int", default=DEFAULT_LIMIT,
                   help="length limit, default: %d" % DEFAULT_LIMIT)
parser.add_option ("-l", "--lang", dest="lang", type = "string", default=DEFAULT_LANG,
                   help="language, default: %s" % DEFAULT_LANG)
parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)
parser.add_option ("-p", "--prio", dest="prio", type = "int", default=DEFAULT_PRIO,
                   help="priority, default: %d" % DEFAULT_PRIO)
parser.add_option ("-t", "--tokenize", action="store_true", dest="tokenize",
                   help="run tokenizer on question and answer for spelling laundering")
Example #29
0
# import xml.etree.ElementTree as ET
from optparse import OptionParser
from lxml import etree as ET

from nltools import misc
from nltools.tokenizer import tokenize

DEFAULT_LOGLEVEL = logging.DEBUG
DEFAULT_OUTPUT = 'foo.chat'
DEFAULT_LANG = 'en'

#
# init, cmdline
#

misc.init_app('aim2halp')

parser = OptionParser("usage: %prog [options] foo.aiml")

parser.add_option("-l",
                  "--lang",
                  dest="lang",
                  type="string",
                  default=DEFAULT_LANG,
                  help="language, default: %s" % DEFAULT_LANG)
parser.add_option("-n",
                  "--aiml-namespace",
                  action="store_true",
                  dest="aiml_namespace",
                  help="use aiml: tags")
parser.add_option("-o",
Example #30
0
from nltools import misc, tokenizer

FILTER_CHARS = [u'[', u']', u'{', u'}', u'(', u')', u'=', u'>', u'<']

DEFAULT_LOGLEVEL = logging.DEBUG
DEFAULT_OUTPUT = 'foo.aip'
DEFAULT_LANG = 'en'
DEFAULT_LIMIT = 12  # tokens
DEFAULT_PRIO = -99

#
# init, cmdline
#

misc.init_app('chat2aip')

parser = OptionParser("usage: %prog [options] foo.chat [ bar.chat ... ]")

parser.add_option("-L",
                  "--limit",
                  dest="limit",
                  type="int",
                  default=DEFAULT_LIMIT,
                  help="length limit, default: %d" % DEFAULT_LIMIT)
parser.add_option("-l",
                  "--lang",
                  dest="lang",
                  type="string",
                  default=DEFAULT_LANG,
                  help="language, default: %s" % DEFAULT_LANG)
Example #31
0
from time import time
from optparse import OptionParser
from StringIO import StringIO

from nltools import misc
from nltools.phonetics import ipa2xsampa
from nltools.tokenizer import tokenize

from speech_lexicon import Lexicon
from speech_transcripts import Transcripts

#
# init
#

misc.init_app('apply_reviews')

config = misc.load_config('.speechrc')

#
# command line
#

parser = OptionParser("usage: %prog foo.csv [bar.csv ...])")

parser.add_option(
    "-f",
    "--force",
    action="store_true",
    dest="force",
    help="force: apply quality rating also on already reviewed entries")
Example #32
0
DEFAULT_OUTPUT     = 'foo.py'
DEFAULT_LANG       = 'en'
DEFAULT_LIMIT      = 12 # tokens

#
# db
#

Session = sessionmaker(bind=model.engine)
session = Session()

#
# init, cmdline
#

misc.init_app('convert_chat')

parser = OptionParser("usage: %prog [options] foo.chat [ bar.chat ... ]")

parser.add_option ("-L", "--limit", dest="limit", type = "int", default=DEFAULT_LIMIT,
                   help="length limit, default: %d" % DEFAULT_LIMIT)
parser.add_option ("-l", "--lang", dest="lang", type = "string", default=DEFAULT_LANG,
                   help="language, default: %s" % DEFAULT_LANG)
parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="verbose output")
parser.add_option ("-o", "--output", dest="outputfn", type = "string", default=DEFAULT_OUTPUT,
                   help="output file, default: %s" % DEFAULT_OUTPUT)

(options, args) = parser.parse_args()

if options.verbose:
Example #33
0
        for i, utt in enumerate(utts_de):
            if i < len(utts_de) - 1:
                print '                "%s",' % utt
            else:
                print '                "%s"' % utt
        print '               ),'
        print '             or ("", "").'

    print


#
# init, cmdline
#

misc.init_app('transform-block')

parser = OptionParser("usage: %prog [options]")

parser.add_option("-v",
                  "--verbose",
                  action="store_true",
                  dest="verbose",
                  help="verbose output")

(options, args) = parser.parse_args()

if options.verbose:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.INFO)
Example #34
0
from optparse               import OptionParser
from StringIO               import StringIO

from nltools                import misc
from nltools.tokenizer      import tokenize

from speech_transcripts     import Transcripts

WORKDIR             = 'data/dst/speech/%s/deepspeech'
PROMPT_AUDIO_FACTOR = 1000

#
# init 
#

misc.init_app ('speech_deepspeech_export')

config = misc.load_config ('.speechrc')

#
# commandline parsing
#

parser = OptionParser("usage: %prog [options] )")

parser.add_option ("-d", "--debug", dest="debug", type='int', default=0,
                   help="limit number of transcripts (debug purposes only), default: 0 (unlimited)")
parser.add_option ("-l", "--lang", dest="lang", type = "str", default='de',
                   help="language (default: de)")
parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="enable verbose logging")
from speech_lexicon import Lexicon
from speech_transcripts import Transcripts

APP_NAME = 'wav2letter_auto_review'

WORK_DIR = 'tmp/w2letter_auto_review'

CUDA_DEVICE = '1'
WAV_MIN_SIZE = 1024

#
# main
#

misc.init_app(APP_NAME)

#
# commandline
#

parser = OptionParser("usage: %prog [options] <model> <audio_corpus>")

parser.add_option(
    "-d",
    "--debug",
    dest="debug",
    type='int',
    default=0,
    help="Limit number of sentences (debug purposes only), default: 0")
Example #36
0
parser.add_option("-l",
                  "--lang",
                  dest="lang",
                  type="str",
                  default='de',
                  help="language (default: de)")

(options, args) = parser.parse_args()

lang = options.lang

#
# init terminal
#

misc.init_app('speech_audio_scan')

#
# config
#

config = misc.load_config('.speechrc')

scan_dirs = []
if lang == 'de':

    scan_dirs.append(config.get("speech", "vf_audiodir_de"))
    scan_dirs.append(config.get("speech", "extrasdir_de"))
    scan_dirs.append(config.get("speech", "gspv2_dir") + '/train')
    scan_dirs.append(config.get("speech", "gspv2_dir") + '/dev')
    # scan_dirs.append(config.get("speech", "gspv2_dir") + '/test')
Example #37
0
DEFAULT_OUTPUT = 'foo.py'
DEFAULT_LANG = 'en'
DEFAULT_LIMIT = 12  # tokens

#
# db
#

Session = sessionmaker(bind=model.engine)
session = Session()

#
# init, cmdline
#

misc.init_app('convert_chat')

parser = OptionParser("usage: %prog [options] foo.chat [ bar.chat ... ]")

parser.add_option("-L",
                  "--limit",
                  dest="limit",
                  type="int",
                  default=DEFAULT_LIMIT,
                  help="length limit, default: %d" % DEFAULT_LIMIT)
parser.add_option("-l",
                  "--lang",
                  dest="lang",
                  type="string",
                  default=DEFAULT_LANG,
                  help="language, default: %s" % DEFAULT_LANG)
    misc.mkdirs('%s/conf' % work_dir)
    misc.copy_file('data/src/speech/kaldi-mfcc.conf',
                   '%s/conf/mfcc.conf' % work_dir)
    misc.copy_file('data/src/speech/kaldi-mfcc-hires.conf',
                   '%s/conf/mfcc_hires.conf' % work_dir)
    misc.copy_file('data/src/speech/kaldi-online-cmvn.conf',
                   '%s/conf/online_cmvn.conf' % work_dir)
    misc.mkdirs('%s/local' % work_dir)
    misc.copy_file('data/src/speech/kaldi-score.sh',
                   '%s/local/score.sh' % work_dir)
    misc.mkdirs('%s/local/nnet3' % work_dir)
    misc.copy_file('data/src/speech/kaldi-run-ivector-common.sh',
                   '%s/local/nnet3/run_ivector_common.sh' % work_dir)


misc.init_app('speech_kaldi_export')

#
# commandline
#

parser = OptionParser(
    "usage: %prog [options] <model_name> <dictionary> <language_model> <audio_corpus> [ <audio_corpus2> ... ]"
)

parser.add_option(
    "-d",
    "--debug",
    dest="debug",
    type='int',
    default=0,
Example #39
0
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
#
# generate AI-Prolog of given names
#

import codecs

from nltools import misc

misc.init_app('names_aip')


def name2pred(name):

    res = u''
    for c in name:
        if c.isalpha():
            res += c

    return 'name' + res


for gender in ['Female', 'Male']:

    with codecs.open('%s20.txt' % gender, 'r', 'utf8') as f:
Example #40
0
from optparse import OptionParser

from nltools                import misc
from nltools.tokenizer      import tokenize
from nltools.phonetics      import ipa2xsampa

from speech_lexicon     import Lexicon
# from speech_transcripts import Transcripts

WORKDIR          = 'data/dst/asr-models/kaldi/segmentation'

#
# init 
#

misc.init_app ('abook_kaldi_segment')

config = misc.load_config ('.speechrc')

#
# commandline parsing
#

parser = OptionParser("usage: %prog [options] model srcdir")

parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
                   help="enable verbose logging")

(options, args) = parser.parse_args()

if options.verbose:
Example #41
0
# - compute length stats about segments
# - compute set of words, print out words not covered by dict
# - decode segments using latest kaldi model
#

LANG = 'de'
PROC_TITLE = 'abook-analyze'

MODELDIR = '../data/models/kaldi-chain-generic-%s-latest' % LANG
MODEL = 'tdnn_sp'

#
# init terminal
#

misc.init_app(PROC_TITLE)

#
# config
#

config = misc.load_config('.speechrc')

#
# command line
#

parser = OptionParser("usage: %prog [options] directory")

parser.add_option("-v",
                  "--verbose",
Example #42
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# generate AI-Prolog of given names
#

import codecs

from nltools import misc

misc.init_app ('names_aip')

def name2pred(name):

    res = u''
    for c in name:
        if c.isalpha():
            res += c

    return 'name' + res

for gender in ['Female', 'Male']:

    with codecs.open('%s20.txt' % gender, 'r', 'utf8') as f:

        for line in f: