def remove_context(self, s, cxt): """remove an adapt context.""" if isinstance(cxt, list): for x in cxt: MycroftSkill.remove_context(s, x) else: MycroftSkill.remove_context(s, cxt)
def run(self, loader): """ Run a test for a skill. The skill, test_case_file and emitter is already set up in the __init__ method Args: loader: A list of loaded skills """ s = [s for s in loader.skills if s and s.root_dir == self.skill] if s: s = s[0] else: # The skill wasn't loaded, print the load log for the skill if self.skill in loader.load_log: print('\n {} Captured Logs from loading {}'.format( '=' * 15, '=' * 15)) print(loader.load_log.pop(self.skill)) raise SkillTestError('Skill couldn\'t be loaded') print("") print(color.HEADER + "=" * 20 + " RUNNING TEST " + "=" * 20 + color.RESET) print('Test file: ', self.test_case_file) with open(self.test_case_file, 'r') as f: test_case = json.load(f) print('Test:', json.dumps(test_case, indent=4, sort_keys=False)) original_settings = None if 'settings' in test_case: original_settings = s.settings s.settings = TestSettings('/tmp/', self.test_case_file) for key in test_case['settings']: s.settings[key] = test_case['settings'][key] print(color.YELLOW, 'will run test with custom settings:', '\n{}'.format(s.settings), color.RESET) if 'responses' in test_case: def get_response(dialog='', data=None, announcement='', validator=None, on_fail=None, num_retries=-1): data = data or {} utt = announcement or s.dialog_renderer.render(dialog, data) print(color.MYCROFT + ">> " + utt + color.RESET) s.speak(utt) response = test_case['responses'].pop(0) print("SENDING RESPONSE:", color.USER_UTT + response + color.RESET) return response s.get_response = get_response # If we keep track of test status for the entire skill, then # get all intents from the skill, and mark current intent # tested if self.test_status: self.test_status.append_intent(s) if 'intent_type' in test_case: self.test_status.set_tested(test_case['intent_type']) evaluation_rule = EvaluationRule(test_case, s) # Set up queue for emitted events. Because # the evaluation method expects events to be received in convoy, # and be handled one by one. We cant make assumptions about threading # in the core or the skill q = Queue() s.bus.q = q # Set up context before calling intent # This option makes it possible to better isolate (reduce dependance) # between test_cases cxt = test_case.get('remove_context', None) if cxt: if isinstance(cxt, list): for x in cxt: MycroftSkill.remove_context(s, x) else: MycroftSkill.remove_context(s, cxt) cxt = test_case.get('set_context', None) if cxt: for key, value in cxt.items(): MycroftSkill.set_context(s, key, value) # Emit an utterance, just like the STT engine does. This sends the # provided text to the skill engine for intent matching and it then # invokes the skill. utt = test_case.get('utterance', None) play_utt = test_case.get('play_query', None) play_start = test_case.get('play_start', None) if utt: print("UTTERANCE:", color.USER_UTT + utt + color.RESET) self.emitter.emit( 'recognizer_loop:utterance', Message('recognizer_loop:utterance', {'utterances': [utt]})) elif play_utt: print('PLAY QUERY', color.USER_UTT + play_utt + color.RESET) self.emitter.emit('play:query', Message('play:query:', {'phrase': play_utt})) elif play_start: print('PLAY START') callback_data = play_start callback_data['skill_id'] = s.skill_id self.emitter.emit('play:start', Message('play:start', callback_data)) else: raise SkillTestError('No input utterance provided') # Wait up to X seconds for the test_case to complete timeout = time.time() + int(test_case.get('evaluation_timeout')) \ if test_case.get('evaluation_timeout', None) and \ isinstance(test_case['evaluation_timeout'], int) \ else time.time() + DEFAULT_EVALUAITON_TIMEOUT while not evaluation_rule.all_succeeded(): try: event = q.get(timeout=1) if ':' in event.type: event.data['__type__'] = event.type.split(':')[1] else: event.data['__type__'] = event.type evaluation_rule.evaluate(event.data) if event.type == 'mycroft.skill.handler.complete': break except Empty: pass if time.time() > timeout: break # Stop emmiter from sending on queue s.bus.q = None # remove the skill which is not responding self.emitter.remove_all_listeners('speak') self.emitter.remove_all_listeners('mycroft.skill.handler.complete') # Report test result if failed if not evaluation_rule.all_succeeded(): self.failure_msg = str(evaluation_rule.get_failure()) print(color.FAIL + "Evaluation failed" + color.RESET) print(color.FAIL + "Failure:", self.failure_msg + color.RESET) return False if original_settings: s.settings = original_settings return True
def run(self, loader): """ Run a test for a skill. The skill, test_case_file and emitter is already set up in the __init__ method Args: loader: A list of loaded skills """ s = [s for s in loader.skills if s and s.root_dir == self.skill] if s: s = s[0] else: raise Exception('Skill couldn\'t be loaded') print('Test case file: ', self.test_case_file) test_case = json.load(open(self.test_case_file, 'r')) print("Test case: ", test_case) if 'responses' in test_case: def get_response(dialog='', data=None, announcement='', validator=None, on_fail=None, num_retries=-1): data = data or {} utt = announcement or s.dialog_renderer.render(dialog, data) s.speak(utt) response = test_case['responses'].pop(0) print(">" + utt) print("Responding with ", response) return response s.get_response = get_response # If we keep track of test status for the entire skill, then # get all intents from the skill, and mark current intent # tested if self.test_status: self.test_status.append_intent(s) if 'intent_type' in test_case: self.test_status.set_tested(test_case['intent_type']) evaluation_rule = EvaluationRule(test_case, s) # Set up queue for emitted events. Because # the evaluation method expects events to be received in convoy, # and be handled one by one. We cant make assumptions about threading # in the core or the skill q = Queue() s.emitter.q = q # Set up context before calling intent # This option makes it possible to better isolate (reduce dependance) # between test_cases cxt = test_case.get('remove_context', None) if cxt: if isinstance(cxt, list): for x in cxt: MycroftSkill.remove_context(s, x) else: MycroftSkill.remove_context(s, cxt) cxt = test_case.get('set_context', None) if cxt: for key, value in cxt.items(): MycroftSkill.set_context(s, key, value) # Emit an utterance, just like the STT engine does. This sends the # provided text to the skill engine for intent matching and it then # invokes the skill. self.emitter.emit( 'recognizer_loop:utterance', Message('recognizer_loop:utterance', {'utterances': [test_case.get('utterance', None)]})) # Wait up to X seconds for the test_case to complete timeout = time.time() + int(test_case.get('evaluation_timeout')) \ if test_case.get('evaluation_timeout', None) and \ isinstance(test_case['evaluation_timeout'], int) \ else time.time() + DEFAULT_EVALUAITON_TIMEOUT while not evaluation_rule.all_succeeded(): try: event = q.get(timeout=1) if ':' in event.type: event.data['__type__'] = event.type.split(':')[1] else: event.data['__type__'] = event.type evaluation_rule.evaluate(event.data) if event.type == 'mycroft.skill.handler.complete': break except Empty: pass if time.time() > timeout: break # Stop emmiter from sending on queue s.emitter.q = None # remove the skill which is not responding self.emitter.remove_all_listeners('speak') self.emitter.remove_all_listeners('mycroft.skill.handler.complete') # Report test result if failed if not evaluation_rule.all_succeeded(): print("Evaluation failed") print("Rule status: ", evaluation_rule.rule) return False return True
def execute_test(self, s): """ Execute test case. Arguments: s (MycroftSkill): mycroft skill to test Returns: (bool) True if the test succeeded completely. """ print("") print(color.HEADER + "="*20 + " RUNNING TEST " + "="*20 + color.RESET) print('Test file: ', self.test_case_file) with open(self.test_case_file, 'r') as f: test_case = json.load(f) print('Test:', json.dumps(test_case, indent=4, sort_keys=False)) if 'settings' in test_case: s.settings = TestSettings('/tmp/', self.test_case_file) for key in test_case['settings']: s.settings[key] = test_case['settings'][key] print(color.YELLOW, 'will run test with custom settings:', '\n{}'.format(s.settings), color.RESET) if 'responses' in test_case: def get_response(dialog='', data=None, announcement='', validator=None, on_fail=None, num_retries=-1): data = data or {} utt = announcement or s.dialog_renderer.render(dialog, data) print(color.MYCROFT + ">> " + utt + color.RESET) s.speak(utt) response = test_case['responses'].pop(0) print("SENDING RESPONSE:", color.USER_UTT + response + color.RESET) return response s.get_response = get_response # If we keep track of test status for the entire skill, then # get all intents from the skill, and mark current intent # tested if self.test_status: self.test_status.append_intent(s) if 'intent_type' in test_case: self.test_status.set_tested(test_case['intent_type']) evaluation_rule = EvaluationRule(test_case, s) # Set up queue for emitted events. Because # the evaluation method expects events to be received in convoy, # and be handled one by one. We cant make assumptions about threading # in the core or the skill q = Queue() s.bus.q = q # Set up context before calling intent # This option makes it possible to better isolate (reduce dependance) # between test_cases cxt = test_case.get('remove_context', None) if cxt: if isinstance(cxt, list): for x in cxt: MycroftSkill.remove_context(s, x) else: MycroftSkill.remove_context(s, cxt) cxt = test_case.get('set_context', None) if cxt: for key, value in cxt.items(): MycroftSkill.set_context(s, key, value) # Emit an utterance, just like the STT engine does. This sends the # provided text to the skill engine for intent matching and it then # invokes the skill. utt = test_case.get('utterance', None) play_utt = test_case.get('play_query', None) play_start = test_case.get('play_start', None) if utt: print("UTTERANCE:", color.USER_UTT + utt + color.RESET) self.emitter.emit( 'recognizer_loop:utterance', Message('recognizer_loop:utterance', {'utterances': [utt]})) elif play_utt: print('PLAY QUERY', color.USER_UTT + play_utt + color.RESET) self.emitter.emit('play:query', Message('play:query:', {'phrase': play_utt})) elif play_start: print('PLAY START') callback_data = play_start callback_data['skill_id'] = s.skill_id self.emitter.emit('play:start', Message('play:start', callback_data)) else: raise SkillTestError('No input utterance provided') # Wait up to X seconds for the test_case to complete timeout = time.time() + int(test_case.get('evaluation_timeout')) \ if test_case.get('evaluation_timeout', None) and \ isinstance(test_case['evaluation_timeout'], int) \ else time.time() + DEFAULT_EVALUAITON_TIMEOUT while not evaluation_rule.all_succeeded(): try: event = q.get(timeout=1) if ':' in event.type: event.data['__type__'] = event.type.split(':')[1] else: event.data['__type__'] = event.type evaluation_rule.evaluate(event.data) if event.type == 'mycroft.skill.handler.complete': break except Empty: pass if time.time() > timeout: break # Stop emmiter from sending on queue s.bus.q = None # remove the skill which is not responding self.emitter.remove_all_listeners('speak') self.emitter.remove_all_listeners('mycroft.skill.handler.complete') # Report test result if failed if not evaluation_rule.all_succeeded(): self.failure_msg = str(evaluation_rule.get_failure()) print(color.FAIL + "Evaluation failed" + color.RESET) print(color.FAIL + "Failure:", self.failure_msg + color.RESET) return False return True