def decodeStream(self, session: DialogSession) -> Optional[ASRResult]: super().decodeStream(session) recorder = Recorder(self._timeout, session.user, session.siteId) self.ASRManager.addRecorder(session.siteId, recorder) self._recorder = recorder with Stopwatch() as processingTime: with recorder as stream: audioStream = stream.audioStream() # noinspection PyUnresolvedReferences try: requests = (types.StreamingRecognizeRequest( audio_content=content) for content in audioStream) responses = self._client.streaming_recognize( self._streamingConfig, requests) result = self._checkResponses(session, responses) except: self.logWarning('Failed ASR request') self.end() return ASRResult( text=result[0], session=session, likelihood=result[1], processingTime=processingTime.time) if result else None
def decodeStream(self, session: DialogSession) -> Optional[ASRResult]: super().decodeStream(session) result = None with Stopwatch() as processingTime: with Recorder(self._timeout) as recorder: self.ASRManager.addRecorder(session.siteId, recorder) self._decoder.start_utt() inSpeech = False for chunk in recorder: if self._timeout.isSet(): break self._decoder.process_raw(chunk, False, False) if self._decoder.get_in_speech() != inSpeech: inSpeech = self._decoder.get_in_speech() if not inSpeech: self._decoder.end_utt() result = self._decoder.hyp() if self._decoder.hyp() else None break self.end(recorder, session) return ASRResult( text=result.hypstr.strip(), session=session, likelihood=self._decoder.hyp().prob, processingTime=processingTime.time ) if result else None
def decodeStream(self, session: DialogSession) -> Optional[ASRResult]: super().decodeStream(session) result = None with Stopwatch() as processingTime: with Recorder(self._timeout) as recorder: self.ASRManager.addRecorder(session.siteId, recorder) self._recorder = recorder streamContext = self._model.createStream() for chunk in recorder: if not chunk: break self._model.feedAudioContent( streamContext, np.frombuffer(chunk, np.int16)) result = self._model.intermediateDecode(streamContext) self.partialTextCaptured(session=session, text=result, likelihood=1, seconds=0) text = self._model.finishStream(streamContext) self.end(session) return ASRResult( text=text, session=session, likelihood=1.0, processingTime=processingTime.time) if result else None
def decodeStream(self, session: DialogSession) -> Optional[ASRResult]: super().decodeStream(session) result = None previous = '' with Stopwatch() as processingTime: with Recorder(self._timeout, session.user, session.deviceUid) as recorder: self.ASRManager.addRecorder(session.deviceUid, recorder) self._recorder = recorder streamContext = self._model.createStream() for chunk in recorder: if not chunk: break streamContext.feedAudioContent( np.frombuffer(chunk, np.int16)) result = streamContext.intermediateDecode() if result and result != previous: previous = result self.partialTextCaptured(session=session, text=result, likelihood=1, seconds=0) text = streamContext.finishStream() self._triggerFlag.clear() self.end() return ASRResult( text=text, session=session, likelihood=1.0, processingTime=processingTime.time) if result else None
def __init__(self, restartHandler: callable): Singleton.__init__(self, self.NAME) self._logger = Logger(prepend='[Project Alice]') self._logger.logInfo('Starting Alice main unit') self._booted = False self._isUpdating = False self._shuttingDown = False self._restart = False self._restartHandler = restartHandler if not self.checkDependencies(): self._restart = True self._restartHandler() else: with Stopwatch() as stopWatch: self._superManager = SuperManager(self) self._superManager.initManagers() self._superManager.onStart() if self._superManager.configManager.getAliceConfigByName( 'useHLC'): self._superManager.commons.runRootSystemCommand( ['systemctl', 'start', 'hermesledcontrol']) self._superManager.onBooted() self._logger.logInfo(f'Started in {stopWatch} seconds') self._booted = True
def nluTrainingThread(self, datasetFile: Path): try: with Stopwatch() as stopWatch: self.logInfo('Begin training...') self._timer = self.ThreadManager.newTimer( interval=0.25, func=self.trainingStatus) tempTrainingData = Path('/tmp/snipsNLU') if tempTrainingData.exists(): shutil.rmtree(tempTrainingData) training: CompletedProcess = self.Commons.runSystemCommand([ f'./venv/bin/snips-nlu', 'train', str(datasetFile), str(tempTrainingData) ]) if training.returncode != 0: self.logError( f'Error while training Snips NLU: {training.stderr.decode()}' ) assistantPath = Path( self.Commons.rootDir(), f'trained/assistants/{self.LanguageManager.activeLanguage}/nlu_engine' ) if not tempTrainingData.exists(): self.trainingFailed() if not assistantPath.exists(): self.logFatal('No NLU engine found, cannot start') self._timer.cancel() return if assistantPath.exists(): shutil.rmtree(assistantPath) shutil.move(tempTrainingData, assistantPath) self._timer.cancel() self.MqttManager.publish(constants.TOPIC_NLU_TRAINING_STATUS, payload={'status': 'done'}) self.WebUINotificationManager.newNotification( typ=UINotificationType.INFO, notification='nluTrainingDone', key='nluTraining') self.ThreadManager.getEvent('TrainAssistant').clear() self.logInfo(f'Snips NLU trained in {stopWatch} seconds') self.broadcast(method=constants.EVENT_NLU_TRAINED, exceptions=[constants.DUMMY], propagateToSkills=True) self.NluManager.restartEngine() except: self.trainingFailed() finally: self.NluManager.training = False
def nluTrainingThread(self, datasetFile: Path): with Stopwatch() as stopWatch: self.logInfo('Begin training...') self._timer = self.ThreadManager.newTimer(interval=10, func=self.trainingStatus) tempTrainingData = Path('/tmp/snipsNLU') if tempTrainingData.exists(): shutil.rmtree(tempTrainingData) training: CompletedProcess = self.Commons.runSystemCommand([ f'./venv/bin/snips-nlu', 'train', str(datasetFile), str(tempTrainingData) ]) if training.returncode != 0: self.logError( f'Error while training Snips NLU: {training.stderr.decode()}' ) assistantPath = Path( self.Commons.rootDir(), f'trained/assistants/assistant_{self.LanguageManager.activeLanguage}/nlu_engine' ) if not tempTrainingData.exists(): self.logError('Snips NLU training failed') if not assistantPath.exists(): self.logFatal('No NLU engine found, cannot start') self._timer.cancel() return if assistantPath.exists(): shutil.rmtree(assistantPath) tempTrainingData.rename(assistantPath) self.broadcast(method=constants.EVENT_NLU_TRAINED, exceptions=[constants.DUMMY], propagateToSkills=True) self.SnipsServicesManager.runCmd(cmd='restart', services=['snips-nlu']) self._timer.cancel() self.ThreadManager.getEvent('TrainAssistant').clear() self.logInfo(f'Snips NLU trained in {stopWatch} seconds')
def __init__(self, restartHandler: callable): Singleton.__init__(self, self.NAME) self.logInfo('Starting up Project Alice') self._booted = False with Stopwatch() as stopWatch: self._restart = False self._restartHandler = restartHandler self._superManager = SuperManager(self) self._superManager.initManagers() self._superManager.onStart() if self._superManager.configManager.getAliceConfigByName('useSLC'): subprocess.run( ['sudo', 'systemctl', 'start', 'snipsledcontrol']) self._superManager.onBooted() self.logInfo(f'- Started Project Alice in {stopWatch} seconds') self._booted = True
def decodeStream(self, session: DialogSession) -> Optional[ASRResult]: super().decodeStream(session) result = None counter = 0 with Stopwatch() as processingTime: with Recorder(self._timeout, session.user, session.deviceUid) as recorder: self.ASRManager.addRecorder(session.deviceUid, recorder) self._recorder = recorder self._decoder.start_utt() inSpeech = False for chunk in recorder: if self._timeout.isSet(): break self._decoder.process_raw(chunk, False, False) hypothesis = self._decoder.hyp() if hypothesis: counter += 1 if counter == 10: self.partialTextCaptured(session, hypothesis.hypstr, hypothesis.prob, processingTime.time) counter = 0 if self._decoder.get_in_speech() != inSpeech: inSpeech = self._decoder.get_in_speech() if not inSpeech: self._decoder.end_utt() result = self._decoder.hyp() if self._decoder.hyp( ) else None break self.end() return ASRResult( text=result.hypstr.strip(), session=session, likelihood=self._decoder.hyp().prob, processingTime=processingTime.time) if result else None
def nluTrainingThread(self, datasetFile: Path): with Stopwatch() as stopWatch: self.logInfo('Begin training...') tempTrainingData = Path('/tmp/snipsNLU') if tempTrainingData.exists(): shutil.rmtree(tempTrainingData) self.Commons.runSystemCommand([ f'./venv/bin/snips-nlu', 'train', str(datasetFile), str(tempTrainingData) ]) assistantPath = Path( self.Commons.rootDir(), f'trained/assistants/assistant_{self.LanguageManager.activeLanguage}/nlu_engine' ) if not tempTrainingData.exists(): self.logError('Snips NLU training failed') if not assistantPath.exists(): self.logFatal('No NLU engine found, cannot start') return if assistantPath.exists(): shutil.rmtree(assistantPath) tempTrainingData.rename(assistantPath) self.broadcast(method=constants.EVENT_NLU_TRAINED, exceptions=[constants.DUMMY], propagateToSkills=True) self.SnipsServicesManager.runCmd(cmd='restart', services=['snips-nlu']) self.logInfo(f'Snips NLU trained in {stopWatch} seconds')