Esempio n. 1
0
 def setUp(self):
     self.recogniser = DummyRecogniser()
     self.asr = ASR(self.recogniser, dummy_lattice_to_nbest)
Esempio n. 2
0
import requests
from flask import Flask, request

from asr import ASR

app = Flask(__name__)

# ==============================================================================
# Parameters
# ==============================================================================

FB_API_URL = 'https://graph.facebook.com/v5.0/me/messages'
VERIFY_TOKEN = '7F151hSt4EBuUSxfc+OxEdNRR20ZCfkVe6i3ywb2ZUY='
PAGE_ACCESS_TOKEN = 'EAAvvvwDxUFABANyZBdYXUjeg6TRCLzgHJsctZCAq9xVRVrtZCgBbsbUPTZB9CnCJSmKBW1nqlyWBO1ZBFmAOME85iFr7pPeHGkm8oHHCBn9EfLXnxZCXVz1JKJnWywdW00aqgtAUa1s5AeeIrOmR3OGsZBt0xXMPvjlKfUQuhCxygZDZD'

asr_model = ASR()

# ==============================================================================
# Messager
# ==============================================================================


def triggers(text):

    text = ' ' + text.lower() + ' '

    if ' no ' in text:
        response = ' -- Can you please take your medication'

    elif ' not yet ' in text:
        response = ' -- Can you please take your medication'
Esempio n. 3
0
 def setUp(self):
     self.recogniser = DummyRecogniser()
     self.asr = ASR(self.recogniser, dummy_to_nbest, dummy_to_best_path)
Esempio n. 4
0
# ********* EXAMPLE SCENARIO ***************
S0 = 45  #------->EUR
sigma = 0.6  #--->EUR.day*-1/2 which corresponds to an annual volatility approximately equal to 21%.
T = 63  #-------->trading days
N = [22, 62]  #-->The set of possible dates for delivery before expiry
V = 4000000  #--->stocks day-1
Q = 20000000  #-->stocks
eta = 0.1  #----->EUR stock-1 day-1
fi = 0.75
gamma = 2.5e-7  #>EUR-1

NQ = 10  # the computational grid for q
INF = 1e9  # value for the infinity

scenario = ASR(S0, sigma, T, N, V, Q, eta, fi, gamma)
scenario.initialize(NQ, INF)

# uncomment 2 of the 3 following lines to calculate and save TETAs
# - use 'save_TETAs()' to save results to a text file
# - use 'save_gzip_TETAs()' to save results to a gzip file
scenario.get_TETAs()
#scenario.save_TETAs()
scenario.save_gzip_TETAs()

# uncoment the 2 of the 4 following lines to read TETAs from a file:
# - use 'read_TETAs' to read from a text file
# - use 'read_gzip_TETAs' to read from a gzip file
#filename = 'teta_qgrid_50_gamma_2.5e-07.txt' # define a filename to read TETAs
#scenario.read_TETAs(filename)
#filename = 'teta_qgrid_50_gamma_2.5e-07.gzip' # define a filename to read TETAs
Esempio n. 5
0
 def fromScratch(cls):
     self = cls()
     self.asr = ASR()
     self.sentMatcher = SentMatcher(corpus="../quran-simple-clean.txt")
def main(config):
    ### create Experiment Directory ###
    # combine all hyperparameters into a single file
    hparams = load_hparams(config.exp_config)
    hparams["model_config"] = load_hparams(config.model_config)

    # create exp dir
    sb.create_experiment_directory(experiment_directory=config.output_folder,
                                   hyperparams_to_save=config.exp_config,
                                   overrides=None)

    ### Datasets and Tokenizer ###
    train_data, valid_data, test_data, tokenizer = dataio_prepare(hparams)

    # Trainer initialization
    run_opts = {
        "device": "cuda:0"
    }  # certain args from yaml file will autoamtically get picked as run_opts
    # see https://github.com/speechbrain/speechbrain/blob/develop/recipes/LibriSpeech/ASR/transformer/train.py#L372
    # see https://github.com/speechbrain/speechbrain/blob/d6adc40e742107c34ae38dc63484171938b4d237/speechbrain/core.py#L124
    #print(type(hparams["model_config"]["modules"]))
    #print(type(hparams))
    #exit()
    asr_brain = ASR(
        modules=hparams["model_config"]["modules"],
        opt_class=hparams["model_config"]["Adam"],
        hparams=hparams["model_config"],
        run_opts=run_opts,
        checkpointer=hparams["model_config"]["checkpointer"],
    )

    # adding objects to trainer:
    asr_brain.tokenizer = tokenizer  # hparams["tokenizer"]

    # Training
    asr_brain.fit(
        asr_brain.hparams.epoch_counter,
        train_data,
        valid_data,
        train_loader_kwargs=hparams["model_config"]["train_dataloader_opts"],
        valid_loader_kwargs=hparams["model_config"]["valid_dataloader_opts"],
    )

    raise NotImplementedError

    ### get Train Data ###
    # list of {'audio__file': str, 'transcript_all_file': str, 'transcript_uid': str, 'filter_criteria': str}
    # meaning that <audio__file>'s transcript is the one in the <transcript_all_file> with id <transcript_uid>
    train_corpus = get_utterance_manifest_from_data_config(
        config.train_data_config)
    for x in train_corpus:
        assert os.path.exists(
            x["transcript_all_file"]
        ), "data transcript file {} does not exist! Exiting!".format(
            x["transcript_all_file"])

    ### create json file for SpeechBrain-->SentencePiece ###
    selected_transcripts_json, annotation_read = create_transcripts_json(
        train_corpus)

    ### train custom SentencePiece Tokenizer ###
    with tempfile.NamedTemporaryFile(mode="w+", suffix=".json") as f:
        f.write(json.dumps(selected_transcripts_json))
        f.seek(0)

        SentencePiece(model_dir=config.output_folder,
                      vocab_size=config.vocab_size,
                      annotation_train=f.name,
                      annotation_read=annotation_read,
                      annotation_format="json",
                      model_type=config.model_type,
                      character_coverage=config.character_coverage,
                      annotation_list_to_check=config.annotation_list_to_check)
Esempio n. 7
0
                      default=DEFAULT_MODEL,
                      help="kaldi model, default: %s" % DEFAULT_MODEL)

    (options, args) = parser.parse_args()

    if options.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    kaldi_model_dir = options.model_dir
    kaldi_model = options.model

    asr = ASR(engine=ASR_ENGINE_NNET3,
              model_dir=kaldi_model_dir,
              kaldi_beam=DEFAULT_BEAM,
              kaldi_acoustic_scale=DEFAULT_ACOUSTIC_SCALE,
              kaldi_frame_subsampling_factor=DEFAULT_FRAME_SUBSAMPLING_FACTOR)

else:
    logging.basicConfig(level=logging.INFO)

    kaldi_model_dir = DEFAULT_MODEL_DIR
    kaldi_model = DEFAULT_MODEL

    asr = ASR(engine=ASR_ENGINE_NNET3,
              model_dir=DEFAULT_MODEL_DIR,
              kaldi_beam=DEFAULT_BEAM,
              kaldi_acoustic_scale=DEFAULT_ACOUSTIC_SCALE,
              kaldi_frame_subsampling_factor=DEFAULT_FRAME_SUBSAMPLING_FACTOR)
	out = asr_model.transcribe(wav)
	toc = time.time()
	app.logger.info("ASR Model Transcription: "+out)
	app.logger.info("ASR Duration: {} seconds".format(toc-tic))
	# form response
	flask_response= app.response_class(response=flask.json.dumps({"text": out}),
										status=200,
										mimetype='application/json' )
	return flask_response





if __name__ == '__main__':
	conf = parse_yaml("conf.yaml")
	
	# load ASR model
	app.logger.info("===== Loading ASR Model =====")
	asr_conf = conf["asr"]
	asr_model = ASR(asr_conf)
	
	# load TTS model
	app.logger.info("===== Loading TTS Model =====")
	tts_conf = conf["tts"]
	tts_model = TTS(tts_conf)

	# run server
	app.logger.info("===== Running the Server =====")
	app.run(host="0.0.0.0", port=5000)