Exemple #1
0
class TestASR(unittest.TestCase):
    def setUp(self):
        self.recogniser = DummyRecogniser()
        self.asr = ASR(self.recogniser, dummy_to_nbest, dummy_to_best_path)

    def test_asr_returns_dummy_final_hypothesis(self):
        interim_hypothesis = self.asr.recognize_chunk(
            self.load_pcm_sample_data())
        final_hypothesis = self.asr.get_final_hypothesis()

        self.assertEqual(type(final_hypothesis), ListType)
        self.assertGreater(len(final_hypothesis), 0)
        for hypothesis in final_hypothesis:

            self.assertEqual(type(hypothesis[0]), FloatType)
            self.assertEqual(type(hypothesis[1]), UnicodeType)
            self.assertGreaterEqual(hypothesis[0], 0)

    def test_recognize_chunk_returns_interim_results(self):
        interim_hypothesis = self.asr.recognize_chunk(
            self.load_pcm_sample_data())

        self.assertEquals(type(interim_hypothesis[0]), FloatType)
        self.assertEquals(type(interim_hypothesis[1]), UnicodeType)

    def test_recognize_chunk_calls_callback(self):
        self.callback_called = False
        self.asr.add_callback(self.callback)
        self.asr.recognize_chunk(self.load_pcm_sample_data())

        self.assertTrue(self.callback_called)

    def test_reset_resets_pipeline(self):
        self.asr.reset()
        self.assertTrue(self.recogniser.resetted)

    def load_pcm_sample_data(self):
        audio = AudioUtils()

        return audio.load_wav_from_file_as_pcm("./resources/test.wav")

    def callback(self):
        self.callback_called = True
Exemple #2
0
class TestASR(unittest.TestCase):

    def setUp(self):
        self.recogniser = DummyRecogniser()
        self.asr = ASR(self.recogniser, dummy_to_nbest, dummy_to_best_path)

    def test_asr_returns_dummy_final_hypothesis(self):
        interim_hypothesis = self.asr.recognize_chunk(self.load_pcm_sample_data())
        final_hypothesis = self.asr.get_final_hypothesis()

        self.assertEqual(type(final_hypothesis), ListType)
        self.assertGreater(len(final_hypothesis), 0)
        for hypothesis in final_hypothesis:

            self.assertEqual(type(hypothesis[0]), FloatType)
            self.assertEqual(type(hypothesis[1]), UnicodeType)
            self.assertGreaterEqual(hypothesis[0], 0)

    def test_recognize_chunk_returns_interim_results(self):
        interim_hypothesis = self.asr.recognize_chunk(self.load_pcm_sample_data())

        self.assertEquals(type(interim_hypothesis[0]), FloatType)
        self.assertEquals(type(interim_hypothesis[1]), UnicodeType)

    def test_recognize_chunk_calls_callback(self):
        self.callback_called = False
        self.asr.add_callback(self.callback)
        self.asr.recognize_chunk(self.load_pcm_sample_data())

        self.assertTrue(self.callback_called)

    def test_reset_resets_pipeline(self):
        self.asr.reset()
        self.assertTrue(self.recogniser.resetted)

    def load_pcm_sample_data(self):
        audio = AudioUtils()

        return audio.load_wav_from_file_as_pcm("./resources/test.wav")

    def callback(self):
        self.callback_called = True
Exemple #3
0
class TestASR(unittest.TestCase):
    def setUp(self):
        self.recogniser = DummyRecogniser()
        self.asr = ASR(self.recogniser, dummy_lattice_to_nbest)

    def test_asr_returns_empty_final_hypothesis_when_nothing_was_decoded(self):
        final_hypothesis = self.asr.get_final_hypothesis()
        self.assertEqual([(1.0, '')], final_hypothesis)

    def test_asr_returns_empty_interim_hypothesis_when_nothing_was_decode(
            self):
        interim_hypothesis = self.asr.recognize_chunk(b'')
        self.assertEqual((1.0, ''), interim_hypothesis)

    def test_asr_returns_dummy_final_hypothesis(self):
        interim_hypothesis = self.asr.recognize_chunk(
            self.load_pcm_sample_data())
        final_hypothesis = self.asr.get_final_hypothesis()

        self.assertEqual(type(final_hypothesis), ListType)
        self.assertGreater(len(final_hypothesis), 0)
        for hypothesis in final_hypothesis:

            self.assertEqual(type(hypothesis[0]), FloatType)
            self.assertEqual(type(hypothesis[1]), UnicodeType)
            self.assertGreaterEqual(hypothesis[0], 0)

    def test_recognize_chunk_returns_interim_results(self):
        interim_hypothesis = self.asr.recognize_chunk(
            self.load_pcm_sample_data())

        self.assertEquals(type(interim_hypothesis[0]), FloatType)
        self.assertEquals(type(interim_hypothesis[1]), UnicodeType)

    def test_reset_resets_pipeline(self):
        self.asr.reset()
        self.assertTrue(self.recogniser.resetted)

    def load_pcm_sample_data(self):
        audio = AudioUtils()

        return audio.load_wav_from_file_as_pcm("./resources/test.wav")
Exemple #4
0
class TestASR(unittest.TestCase):

    def setUp(self):
        self.recogniser = DummyRecogniser()
        self.asr = ASR(self.recogniser, dummy_lattice_to_nbest)

    def test_asr_returns_empty_final_hypothesis_when_nothing_was_decoded(self):
        final_hypothesis = self.asr.get_final_hypothesis()
        self.assertEqual([(1.0, '')], final_hypothesis)

    def test_asr_returns_empty_interim_hypothesis_when_nothing_was_decode(self):
        interim_hypothesis = self.asr.recognize_chunk(b'')
        self.assertEqual((1.0, ''), interim_hypothesis)

    def test_asr_returns_dummy_final_hypothesis(self):
        interim_hypothesis = self.asr.recognize_chunk(self.load_pcm_sample_data())
        final_hypothesis = self.asr.get_final_hypothesis()

        self.assertEqual(type(final_hypothesis), ListType)
        self.assertGreater(len(final_hypothesis), 0)
        for hypothesis in final_hypothesis:

            self.assertEqual(type(hypothesis[0]), FloatType)
            self.assertEqual(type(hypothesis[1]), UnicodeType)
            self.assertGreaterEqual(hypothesis[0], 0)

    def test_recognize_chunk_returns_interim_results(self):
        interim_hypothesis = self.asr.recognize_chunk(self.load_pcm_sample_data())

        self.assertEquals(type(interim_hypothesis[0]), FloatType)
        self.assertEquals(type(interim_hypothesis[1]), UnicodeType)

    def test_reset_resets_pipeline(self):
        self.asr.reset()
        self.assertTrue(self.recogniser.resetted)

    def load_pcm_sample_data(self):
        audio = AudioUtils()

        return audio.load_wav_from_file_as_pcm("./resources/test.wav")
Exemple #5
0
 def setUp(self):
     self.recogniser = DummyRecogniser()
     self.asr = ASR(self.recogniser, dummy_lattice_to_nbest)
Exemple #6
0
 def setUp(self):
     self.recogniser = DummyRecogniser()
     self.asr = ASR(self.recogniser, dummy_lattice_to_nbest)
Exemple #7
0
 def setUp(self):
     self.recogniser = DummyRecogniser()
     self.asr = ASR(self.recogniser, dummy_to_nbest, dummy_to_best_path)
Exemple #8
0
from tts import TTS
from asr import ASR

TTS.speak(ASR.listen())
Exemple #9
0
 def setUp(self):
     self.recogniser = DummyRecogniser()
     self.asr = ASR(self.recogniser, dummy_to_nbest, dummy_to_best_path)
Exemple #10
0
# ********* EXAMPLE SCENARIO ***************
S0 = 45  #------->EUR
sigma = 0.6  #--->EUR.day*-1/2 which corresponds to an annual volatility approximately equal to 21%.
T = 63  #-------->trading days
N = [22, 62]  #-->The set of possible dates for delivery before expiry
V = 4000000  #--->stocks day-1
Q = 20000000  #-->stocks
eta = 0.1  #----->EUR stock-1 day-1
fi = 0.75
gamma = 2.5e-7  #>EUR-1

NQ = 10  # the computational grid for q
INF = 1e9  # value for the infinity

scenario = ASR(S0, sigma, T, N, V, Q, eta, fi, gamma)
scenario.initialize(NQ, INF)

# uncomment 2 of the 3 following lines to calculate and save TETAs
# - use 'save_TETAs()' to save results to a text file
# - use 'save_gzip_TETAs()' to save results to a gzip file
scenario.get_TETAs()
#scenario.save_TETAs()
scenario.save_gzip_TETAs()

# uncoment the 2 of the 4 following lines to read TETAs from a file:
# - use 'read_TETAs' to read from a text file
# - use 'read_gzip_TETAs' to read from a gzip file
#filename = 'teta_qgrid_50_gamma_2.5e-07.txt' # define a filename to read TETAs
#scenario.read_TETAs(filename)
#filename = 'teta_qgrid_50_gamma_2.5e-07.gzip' # define a filename to read TETAs
import requests
from flask import Flask, request

from asr import ASR

app = Flask(__name__)

# ==============================================================================
# Parameters
# ==============================================================================

FB_API_URL = 'https://graph.facebook.com/v5.0/me/messages'
VERIFY_TOKEN = '7F151hSt4EBuUSxfc+OxEdNRR20ZCfkVe6i3ywb2ZUY='
PAGE_ACCESS_TOKEN = 'EAAvvvwDxUFABANyZBdYXUjeg6TRCLzgHJsctZCAq9xVRVrtZCgBbsbUPTZB9CnCJSmKBW1nqlyWBO1ZBFmAOME85iFr7pPeHGkm8oHHCBn9EfLXnxZCXVz1JKJnWywdW00aqgtAUa1s5AeeIrOmR3OGsZBt0xXMPvjlKfUQuhCxygZDZD'

asr_model = ASR()

# ==============================================================================
# Messager
# ==============================================================================


def triggers(text):

    text = ' ' + text.lower() + ' '

    if ' no ' in text:
        response = ' -- Can you please take your medication'

    elif ' not yet ' in text:
        response = ' -- Can you please take your medication'
 def fromScratch(cls):
     self = cls()
     self.asr = ASR()
     self.sentMatcher = SentMatcher(corpus="../quran-simple-clean.txt")
class QuranChaptersFinder():
    def __init__(self):
        pass

    @classmethod
    def fromScratch(cls):
        self = cls()
        self.asr = ASR()
        self.sentMatcher = SentMatcher(corpus="../quran-simple-clean.txt")

    @classmethod
    def fromAudioLabeling(cls, audioLabeling):
        self = cls(audioLabeling.basedir)
        self.asr = audioLabeling.asr
        self.sentMatcher = audioLabeling.sentMatcher

    def getChaptersPositions(self, filePath, fileLabels):
        '''
            filepath: audio file path
            fileLabels: a dictionary of the labels contaning first and last indexes of verses in the file (usually generated from audiolabeling class)
            this function tends to get where the whole chapters within this audio
        '''
        # validate first the firstIndex and lastIndex
        # get a list of chapters that is presented between two indexes
        # try to make a formula to initialize the window in most probably region that have the beginning of the chapter
        # use QuranAsr with corpus contaning only the first verses of chapters to get it quickly and with good performance

        # for now, this function is scanning the file and find if it noisy or not
        duration = 15  # in seconds
        starts = range(0, 10 * 60 * 60 * 1000, duration)
        unrecognizedTime = 0
        for s in starts:
            text = self.asr.recognizeGoogle(filePath,
                                            start=s,
                                            duration=duration)
            print("asr text:", text)
            # text = text[::-1]
            unrecognizedTime += text == None
            if text:
                print("text:", text)
                result = self.sentMatcher.match(text)
                print("sentMatcher result:", result)
            if (unrecognizedTime == 7):
                print("file is so noisy, we can't find any word in it")
                return
            return

    def _getChaptersInRegion(self, fromIndex=1, toIndex=6236):
        chapters = None
        with open("chapters_begin_end.txt", 'r') as chaptersPosFile:
            chapters = chaptersPosFile.read().splitlines(False)
        res = []
        for chapter in chapters:
            sura, start, end = [int(x) for x in chapter.split(" ")]
            if (start >= fromIndex and start <= toIndex):
                # beginning of the chapter in this region
                res.append({
                    "sura": sura,
                    'actualLocation': start,
                    'type': 'start'
                })
            if (end >= fromIndex and end <= toIndex):
                # end of the chapter in this region
                res.append({
                    "sura": sura,
                    'actualLocation': end,
                    'type': 'end'
                })
        return res
def main(config):
    ### create Experiment Directory ###
    # combine all hyperparameters into a single file
    hparams = load_hparams(config.exp_config)
    hparams["model_config"] = load_hparams(config.model_config)

    # create exp dir
    sb.create_experiment_directory(experiment_directory=config.output_folder,
                                   hyperparams_to_save=config.exp_config,
                                   overrides=None)

    ### Datasets and Tokenizer ###
    train_data, valid_data, test_data, tokenizer = dataio_prepare(hparams)

    # Trainer initialization
    run_opts = {
        "device": "cuda:0"
    }  # certain args from yaml file will autoamtically get picked as run_opts
    # see https://github.com/speechbrain/speechbrain/blob/develop/recipes/LibriSpeech/ASR/transformer/train.py#L372
    # see https://github.com/speechbrain/speechbrain/blob/d6adc40e742107c34ae38dc63484171938b4d237/speechbrain/core.py#L124
    #print(type(hparams["model_config"]["modules"]))
    #print(type(hparams))
    #exit()
    asr_brain = ASR(
        modules=hparams["model_config"]["modules"],
        opt_class=hparams["model_config"]["Adam"],
        hparams=hparams["model_config"],
        run_opts=run_opts,
        checkpointer=hparams["model_config"]["checkpointer"],
    )

    # adding objects to trainer:
    asr_brain.tokenizer = tokenizer  # hparams["tokenizer"]

    # Training
    asr_brain.fit(
        asr_brain.hparams.epoch_counter,
        train_data,
        valid_data,
        train_loader_kwargs=hparams["model_config"]["train_dataloader_opts"],
        valid_loader_kwargs=hparams["model_config"]["valid_dataloader_opts"],
    )

    raise NotImplementedError

    ### get Train Data ###
    # list of {'audio__file': str, 'transcript_all_file': str, 'transcript_uid': str, 'filter_criteria': str}
    # meaning that <audio__file>'s transcript is the one in the <transcript_all_file> with id <transcript_uid>
    train_corpus = get_utterance_manifest_from_data_config(
        config.train_data_config)
    for x in train_corpus:
        assert os.path.exists(
            x["transcript_all_file"]
        ), "data transcript file {} does not exist! Exiting!".format(
            x["transcript_all_file"])

    ### create json file for SpeechBrain-->SentencePiece ###
    selected_transcripts_json, annotation_read = create_transcripts_json(
        train_corpus)

    ### train custom SentencePiece Tokenizer ###
    with tempfile.NamedTemporaryFile(mode="w+", suffix=".json") as f:
        f.write(json.dumps(selected_transcripts_json))
        f.seek(0)

        SentencePiece(model_dir=config.output_folder,
                      vocab_size=config.vocab_size,
                      annotation_train=f.name,
                      annotation_read=annotation_read,
                      annotation_format="json",
                      model_type=config.model_type,
                      character_coverage=config.character_coverage,
                      annotation_list_to_check=config.annotation_list_to_check)
                      default=DEFAULT_MODEL,
                      help="kaldi model, default: %s" % DEFAULT_MODEL)

    (options, args) = parser.parse_args()

    if options.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    kaldi_model_dir = options.model_dir
    kaldi_model = options.model

    asr = ASR(engine=ASR_ENGINE_NNET3,
              model_dir=kaldi_model_dir,
              kaldi_beam=DEFAULT_BEAM,
              kaldi_acoustic_scale=DEFAULT_ACOUSTIC_SCALE,
              kaldi_frame_subsampling_factor=DEFAULT_FRAME_SUBSAMPLING_FACTOR)

else:
    logging.basicConfig(level=logging.INFO)

    kaldi_model_dir = DEFAULT_MODEL_DIR
    kaldi_model = DEFAULT_MODEL

    asr = ASR(engine=ASR_ENGINE_NNET3,
              model_dir=DEFAULT_MODEL_DIR,
              kaldi_beam=DEFAULT_BEAM,
              kaldi_acoustic_scale=DEFAULT_ACOUSTIC_SCALE,
              kaldi_frame_subsampling_factor=DEFAULT_FRAME_SUBSAMPLING_FACTOR)
	out = asr_model.transcribe(wav)
	toc = time.time()
	app.logger.info("ASR Model Transcription: "+out)
	app.logger.info("ASR Duration: {} seconds".format(toc-tic))
	# form response
	flask_response= app.response_class(response=flask.json.dumps({"text": out}),
										status=200,
										mimetype='application/json' )
	return flask_response





if __name__ == '__main__':
	conf = parse_yaml("conf.yaml")
	
	# load ASR model
	app.logger.info("===== Loading ASR Model =====")
	asr_conf = conf["asr"]
	asr_model = ASR(asr_conf)
	
	# load TTS model
	app.logger.info("===== Loading TTS Model =====")
	tts_conf = conf["tts"]
	tts_model = TTS(tts_conf)

	# run server
	app.logger.info("===== Running the Server =====")
	app.run(host="0.0.0.0", port=5000)