def _build_confusion_network(self, sampled_da_items): confusion_net = DialogueActConfusionNetwork() for da_items, probs in sampled_da_items: for dai, prob in zip(da_items, probs): confusion_net.add_merge(prob, dai) return confusion_net
def _build_confusion_network(self, sampled_da_items): '''Build confusion network from a list containing DialgoueActItem and their observation probability.''' confusion_net = DialogueActConfusionNetwork() for da_items, probs in sampled_da_items: for dai, prob in zip(da_items, probs): confusion_net.add_merge(prob, dai) return confusion_net
def test_get_prob(self): dacn = DialogueActConfusionNetwork() dacn.add(0.2, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.1, DialogueActItem(dai='inform(food=russian)')) self.assertAlmostEqual(dacn._get_prob([0, 1, 1]), 0.2 * 0.3 * 0.9) self.assertAlmostEqual(dacn._get_prob([0, 0, 0]), 0.2 * 0.7 * 0.1)
def test_prune(self): dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)')) # Russian food should be pruned. self.assertEqual(len(dacn), 3) dacn.prune() self.assertEqual(len(dacn), 2) self.assertTrue(not DialogueActItem( dai='inform(food=russian)') in dacn)
def context_resolution(self, user_da, system_da): """Resolves and converts meaning of some user dialogue acts given the context.""" old_user_da = deepcopy(user_da) new_user_da = DialogueActConfusionNetwork() if isinstance(system_da, DialogueAct): for system_dai in system_da: for prob, user_dai in user_da: new_user_dai = None if system_dai.dat == "confirm" and user_dai.dat == "affirm": new_user_dai = DialogueActItem("inform", system_dai.name, system_dai.value) elif system_dai.dat == "confirm" and user_dai.dat == "negate": new_user_dai = DialogueActItem("deny", system_dai.name, system_dai.value) elif system_dai.dat == "request" and user_dai.dat == "inform" and \ user_dai.name in self.ontology['context_resolution'] and \ system_dai.name in self.ontology['context_resolution'][user_dai.name] and \ user_dai.value == "dontcare": new_user_dai = DialogueActItem("inform", system_dai.name, system_dai.value) elif system_dai.dat == "request" and user_dai.dat == "inform" and \ user_dai.name in self.ontology['context_resolution'] and \ system_dai.name in self.ontology['context_resolution'][user_dai.name] and \ self.ontology.slot_has_value(system_dai.name, user_dai.value): new_user_dai = DialogueActItem("inform", system_dai.name, user_dai.value) elif system_dai.dat == "request" and system_dai.name != "" and \ user_dai.dat == "affirm" and self.ontology.slot_is_binary(system_dai.name): new_user_dai = DialogueActItem("inform", system_dai.name, "true") elif system_dai.dat == "request" and system_dai.name != "" and \ user_dai.dat == "negate" and self.ontology.slot_is_binary(system_dai.name): new_user_dai = DialogueActItem("inform", system_dai.name, "false") if new_user_dai: new_user_da.add(prob, new_user_dai) old_user_da.extend(new_user_da) return old_user_da
def test_sort(self): dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(1.0, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)')) dacn.sort() cn = list(dacn) self.assertEqual(cn[0][1], DialogueActItem(dai='inform(food=czech)')) self.assertEqual(cn[1][1], DialogueActItem(dai='inform(food=chinese)')) self.assertEqual(cn[2][1], DialogueActItem(dai='inform(food=russian)'))
def test_prune(self): dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)')) # Russian food should be pruned. self.assertEqual(len(dacn), 3) dacn.prune() self.assertEqual(len(dacn), 2) self.assertTrue(not DialogueActItem(dai='inform(food=russian)') in dacn)
def test_add_merge(self): dai = DialogueActItem(dai='inform(food=chinese)') dacn = DialogueActConfusionNetwork() dacn.add_merge(0.5, dai, combine='add') self.assertEqual(dacn._get_prob([0]), 0.5) dacn.add_merge(0.5, dai, combine='add') self.assertEqual(dacn._get_prob([0]), 1.0)
def _resolve_user_da_in_context(self, user_da, system_da): """Resolves and converts meaning of some user dialogue acts given the context.""" old_user_da = deepcopy(user_da) new_user_da = DialogueActConfusionNetwork() if isinstance(system_da, DialogueAct): for system_dai in system_da: for prob, user_dai in user_da: new_user_dai = None if system_dai.dat == "confirm" and user_dai.dat == "affirm": new_user_dai = DialogueActItem("inform", system_dai.name, system_dai.value) elif system_dai.dat == "confirm" and user_dai.dat == "negate": new_user_dai = DialogueActItem("deny", system_dai.name, system_dai.value) elif system_dai.dat == "request" and user_dai.dat == "inform" and \ user_dai.name in self.ontology['context_resolution'] and \ system_dai.name in self.ontology['context_resolution'][user_dai.name] and \ user_dai.value == "dontcare": new_user_dai = DialogueActItem("inform", system_dai.name, system_dai.value) elif system_dai.dat == "request" and user_dai.dat == "inform" and \ user_dai.name in self.ontology['context_resolution'] and \ system_dai.name in self.ontology['context_resolution'][user_dai.name] and \ self.ontology.slot_has_value(system_dai.name, user_dai.value): new_user_dai = DialogueActItem("inform", system_dai.name, user_dai.value) elif system_dai.dat == "request" and system_dai.name != "" and \ user_dai.dat == "affirm" and self.ontology.slot_is_binary(system_dai.name): new_user_dai = DialogueActItem("inform", system_dai.name, "true") elif system_dai.dat == "request" and system_dai.name != "" and \ user_dai.dat == "negate" and self.ontology.slot_is_binary(system_dai.name): new_user_dai = DialogueActItem("inform", system_dai.name, "false") if new_user_dai: new_user_da.add(prob, new_user_dai) old_user_da.merge(new_user_da, combine='max') return old_user_da
def _infer_last_talked_about_slots(self, user_da, system_da): """This adds dialogue act items to support inference of the last slots the user talked about.""" old_user_da = deepcopy(user_da) new_user_da = DialogueActConfusionNetwork() colliding_slots = {} done_slots = set() for prob, user_dai in user_da: new_user_dais = [] lta_tsvs = self.ontology.last_talked_about(user_dai.dat, user_dai.name, user_dai.value) for name, value in lta_tsvs: new_user_dais.append(DialogueActItem("inform", name, value)) if name in done_slots: if not name in colliding_slots: colliding_slots[name] = set() colliding_slots[name].add(value) else: done_slots.add(name) if new_user_dais: for nudai in new_user_dais: if not nudai in new_user_da: new_user_da.add(prob, nudai) # In case of collisions, prefer the current last talked about values if it is one of the colliding values. # If there is a collision and the current last talked about value is not among the colliding values, do not # consider the colliding DA's at all. invalid_das = set() for prob, da in set(new_user_da): if da.name in colliding_slots and self[da.name].mpv() in colliding_slots[da.name]: if not da.value == self[da.name].mpv(): invalid_das.add(da) elif da.name in colliding_slots: invalid_das.add(da) for invalid_da in invalid_das: new_user_da.remove(invalid_da) old_user_da.merge(new_user_da, combine='max') return old_user_da
def last_talked_about(self, user_da, system_da): """This adds dialogue act items to support inference of the last slots the user talked about.""" old_user_da = deepcopy(user_da) new_user_da = DialogueActConfusionNetwork() for prob, user_dai in user_da: new_user_dais = [] lta_tsvs = self.ontology.last_talked_about(user_dai.dat, user_dai.name, user_dai.value) for name, value in lta_tsvs: new_user_dais.append(DialogueActItem("inform", name, value)) if new_user_dais: for nudai in new_user_dais: new_user_da.add(prob, nudai) old_user_da.extend(new_user_da) return old_user_da
def parse_nblist(self, obs, *args, **kwargs): """ Parses an observation featuring an utterance n-best list using the parse_1_best method. Arguments: obs -- a dictionary of observations :: observation type -> observed value where observation type is one of values for `obs_type' used in `ft_props', and observed value is the corresponding observed value for the input args -- further positional arguments that should be passed to the `parse_1_best' method call kwargs -- further keyword arguments that should be passed to the `parse_1_best' method call """ nblist = obs['utt_nbl'] if len(nblist) == 0: return DialogueActConfusionNetwork() obs_wo_nblist = copy.deepcopy(obs) del obs_wo_nblist['utt_nbl'] dacn_list = [] for prob, utt in nblist: if "_other_" == utt: dacn = DialogueActConfusionNetwork() dacn.add(1.0, DialogueActItem("other")) elif "_silence_" == utt: dacn = DialogueActConfusionNetwork() dacn.add(1.0, DialogueActItem("silence")) else: obs_wo_nblist['utt'] = utt dacn = self.parse_1_best(obs_wo_nblist, *args, **kwargs) dacn_list.append((prob, dacn)) dacn = merge_slu_confnets(dacn_list) dacn.prune() dacn.sort() return dacn
def test_get_platform_res_da(self): hdc_policy = self._build_policy() state = DeterministicDiscriminativeDialogueState(self.cfg, self.ontology) system_input = DialogueActConfusionNetwork() res = hdc_policy.get_da(state) user_input = DialogueActConfusionNetwork() user_input.add(1.0, DialogueActItem(dai='info(task=find_platform)')) user_input.add(1.0, DialogueActItem(dai='inform(from_stop=Praha)')) user_input.add(1.0, DialogueActItem(dai='inform(to_stop=Brno)')) state.update(user_input, system_input) res = hdc_policy.get_da(state) self.assert_('inform(not_supported)' in res)
def main(): # initialize tracker and state slots = ["food", "location"] tr = DSTCTracker(slots) state = DSTCState(slots) state.pprint() # try to update state with some information print '---' cn = DialogueActConfusionNetwork() cn.add(0.3, DialogueActItem("inform", "food", "chinese")) cn.add(0.1, DialogueActItem("inform", "food", "indian")) tr.update_state(state, cn) state.pprint() # try to deny some information print '---' cn.add(0.9, DialogueActItem("deny", "food", "chinese")) cn.add(0.1, DialogueActItem("deny", "food", "indian")) tr.update_state(state, cn) state.pprint()
def _infer_last_talked_about_slots(self, user_da, system_da): """This adds dialogue act items to support inference of the last slots the user talked about.""" old_user_da = deepcopy(user_da) new_user_da = DialogueActConfusionNetwork() colliding_slots = {} done_slots = set() for prob, user_dai in user_da: new_user_dais = [] lta_tsvs = self.ontology.last_talked_about(user_dai.dat, user_dai.name, user_dai.value) for name, value in lta_tsvs: new_user_dais.append(DialogueActItem("inform", name, value)) if name in done_slots: if not name in colliding_slots: colliding_slots[name] = set() colliding_slots[name].add(value) else: done_slots.add(name) if new_user_dais: for nudai in new_user_dais: if not nudai in new_user_da: new_user_da.add(prob, nudai) # In case of collisions, prefer the current last talked about values if it is one of the colliding values. # If there is a collision and the current last talked about value is not among the colliding values, do not # consider the colliding DA's at all. invalid_das = set() for prob, da in set(new_user_da): if da.name in colliding_slots and self[ da.name].mpv() in colliding_slots[da.name]: if not da.value == self[da.name].mpv(): invalid_das.add(da) elif da.name in colliding_slots: invalid_das.add(da) for invalid_da in invalid_das: new_user_da.remove(invalid_da) old_user_da.merge(new_user_da, combine='max') return old_user_da
def test_switching_tasks(self): hdc_policy = self._build_policy() self.mox.StubOutWithMock(hdc_policy.weather, 'get_weather') self.mox.StubOutWithMock(hdc_policy, 'get_directions') hdc_policy.weather.get_weather(city=u'Praha', daily=False, lat=u'50.0755381', lon=u'14.4378005', time=None).AndReturn(None) hdc_policy.get_directions(mox.IgnoreArg(), check_conflict=True).AndReturn([DialogueActItem(dai="inform(time=10:00)")]) self.mox.ReplayAll() state = DeterministicDiscriminativeDialogueState(self.cfg, self.ontology) system_input = DialogueActConfusionNetwork() res = hdc_policy.get_da(state) # User says she wants weather so the task should be weather. user_input = self._build_user_input("inform(task=weather)") state.update(user_input, system_input) res = hdc_policy.get_da(state) self.assertEqual(state['lta_task'].mpv(), 'weather') # User wants to find a connection so the task should be find_connection. user_input = self._build_user_input(u"inform(task=find_connection)", u"inform(to_stop=Malostranská)", u"inform(from_stop=Anděl)") state.update(user_input, system_input) res = hdc_policy.get_da(state) self.assertEqual(state['lta_task'].mpv(), 'find_connection') self.mox.VerifyAll()
def test_session_logger(self): cfg = Config.load_configs(config=CONFIG_DICT, use_default=False) sl = SessionLogger() # test 3 calls at once for i in range(3): sess_dir = "./%d" % i if not os.path.isdir(sess_dir): os.mkdir(sess_dir) sl.session_start(sess_dir) sl.config('config = ' + unicode(cfg)) sl.header(cfg['Logging']["system_name"], cfg['Logging']["version"]) sl.input_source("voip") sl.dialogue_rec_start(None, "both_complete_dialogue.wav") sl.dialogue_rec_start("system", "system_complete_dialogue.wav") sl.dialogue_rec_start("user", "user_complete_dialogue.wav") sl.dialogue_rec_end("both_complete_dialogue.wav") sl.dialogue_rec_end("system_complete_dialogue.wav") sl.dialogue_rec_end("user_complete_dialogue.wav") sl.turn("system") sl.dialogue_act("system", "hello()") sl.text("system", "Hello.") sl.rec_start("system", "system1.wav") sl.rec_end("system1.wav") sl.turn("user") sl.rec_start("user", "user1.wav") sl.rec_end("user1.wav") A1, A2, A3 = 0.90, 0.05, 0.05 B1, B2, B3 = 0.70, 0.20, 0.10 C1, C2, C3 = 0.80, 0.10, 0.10 asr_confnet = UtteranceConfusionNetwork() asr_confnet.add([[A1, "want"], [A2, "has"], [A3, 'ehm']]) asr_confnet.add([[B1, "Chinese"], [B2, "English"], [B3, 'cheap']]) asr_confnet.add([[C1, "restaurant"], [C2, "pub"], [C3, 'hotel']]) asr_confnet.merge() asr_confnet.normalise() asr_confnet.sort() asr_nblist = asr_confnet.get_utterance_nblist() sl.asr("user", "user1.wav", asr_nblist, asr_confnet) slu_confnet = DialogueActConfusionNetwork() slu_confnet.add(0.7, DialogueActItem('hello')) slu_confnet.add(0.6, DialogueActItem('thankyou')) slu_confnet.add(0.4, DialogueActItem('restart')) slu_confnet.add(0.1, DialogueActItem('bye')) slu_confnet.merge() slu_confnet.normalise() slu_confnet.sort() slu_nblist = slu_confnet.get_da_nblist() sl.slu("user", "user1.wav", slu_nblist, slu_confnet) sl.turn("system") sl.dialogue_act("system", "thankyou()") sl.text("system", "Thank you.", cost = 1.0) sl.rec_start("system", "system2.wav") sl.rec_end("system2.wav") sl.barge_in("system", tts_time = True) sl.turn("user") sl.rec_start("user", "user2.wav") sl.rec_end("user2.wav") sl.hangup("user")
def test_session_logger(self): cfg = Config.load_configs(config=CONFIG_DICT, use_default=False) sl = SessionLogger() # test 3 calls at once for i in range(3): sess_dir = "./%d" % i if not os.path.isdir(sess_dir): os.mkdir(sess_dir) sl.session_start(sess_dir) sl.config('config = ' + unicode(cfg)) sl.header(cfg['Logging']["system_name"], cfg['Logging']["version"]) sl.input_source("voip") sl.dialogue_rec_start(None, "both_complete_dialogue.wav") sl.dialogue_rec_start("system", "system_complete_dialogue.wav") sl.dialogue_rec_start("user", "user_complete_dialogue.wav") sl.dialogue_rec_end("both_complete_dialogue.wav") sl.dialogue_rec_end("system_complete_dialogue.wav") sl.dialogue_rec_end("user_complete_dialogue.wav") sl.turn("system") sl.dialogue_act("system", "hello()") sl.text("system", "Hello.") sl.rec_start("system", "system1.wav") sl.rec_end("system1.wav") sl.turn("user") sl.rec_start("user", "user1.wav") sl.rec_end("user1.wav") A1, A2, A3 = 0.90, 0.05, 0.05 B1, B2, B3 = 0.70, 0.20, 0.10 C1, C2, C3 = 0.80, 0.10, 0.10 asr_confnet = UtteranceConfusionNetwork() asr_confnet.add([[A1, "want"], [A2, "has"], [A3, 'ehm']]) asr_confnet.add([[B1, "Chinese"], [B2, "English"], [B3, 'cheap']]) asr_confnet.add([[C1, "restaurant"], [C2, "pub"], [C3, 'hotel']]) asr_confnet.merge() asr_confnet.normalise() asr_confnet.sort() asr_nblist = asr_confnet.get_utterance_nblist() sl.asr("user", "user1.wav", asr_nblist, asr_confnet) slu_confnet = DialogueActConfusionNetwork() slu_confnet.add(0.7, DialogueActItem('hello')) slu_confnet.add(0.6, DialogueActItem('thankyou')) slu_confnet.add(0.4, DialogueActItem('restart')) slu_confnet.add(0.1, DialogueActItem('bye')) slu_confnet.merge() slu_confnet.normalise() slu_confnet.sort() slu_nblist = slu_confnet.get_da_nblist() sl.slu("user", "user1.wav", slu_nblist, slu_confnet) sl.turn("system") sl.dialogue_act("system", "thankyou()") sl.text("system", "Thank you.", cost=1.0) sl.rec_start("system", "system2.wav") sl.rec_end("system2.wav") sl.barge_in("system", tts_time=True) sl.turn("user") sl.rec_start("user", "user2.wav") sl.rec_end("user2.wav") sl.hangup("user")
def test_merge_slu_confnets(self): confnet1 = DialogueActConfusionNetwork() confnet1.add(0.7, DialogueActItem('hello')) confnet1.add(0.2, DialogueActItem('bye')) confnet2 = DialogueActConfusionNetwork() confnet2.add(0.6, DialogueActItem('hello')) confnet2.add(0.3, DialogueActItem('restart')) confnets = [[0.7, confnet1], [0.3, confnet2]] merged_confnets = merge_slu_confnets(confnets) correct_merged_confnet = DialogueActConfusionNetwork() correct_merged_confnet.add_merge(0.7 * 0.7, DialogueActItem('hello'), combine='add') correct_merged_confnet.add_merge(0.7 * 0.2, DialogueActItem('bye'), combine='add') correct_merged_confnet.add_merge(0.3 * 0.6, DialogueActItem('hello'), combine='add') correct_merged_confnet.add_merge(0.3 * 0.3, DialogueActItem('restart'), combine='add') s = [] s.append("") s.append("Merged confnets:") s.append(unicode(merged_confnets)) s.append("") s.append("Correct merged results:") s.append(unicode(correct_merged_confnet)) s.append("") print '\n'.join(s) self.assertEqual(unicode(merged_confnets), unicode(correct_merged_confnet))
def main(): from alex.utils.config import Config from alex.utils.caminfodb import CamInfoDb # This implicitly loads also the default config. cfg = Config.load_configs(['resources/lz.cfg'], project_root=True) db_cfg = cfg['DM']["PUfalRuleDM"]['db_cfg'] # database provider db = CamInfoDb(db_cfg) pdm = PRuleDM(cfg, db) pdm.new_dialogue() pdm.da_out() # user's input cn = DialogueActConfusionNetwork() cn.add(0.7, DialogueActItem(dai="inform(food=chinese)")) cn.add(0.2, DialogueActItem(dai="inform(food=indian)")) cn.add(0.5, DialogueActItem(dai="inform(food=chinese)")) cn.add(0.1, DialogueActItem(dai="inform(food=czech)")) cn.add(0.1, DialogueActItem(dai="confirm(food=czech)")) cn.add(0.6, DialogueActItem(dai="request(phone)")) cn.add(0.3, DialogueActItem(dai="reset()")) cn.add(0.3, DialogueActItem(dai="asdf()")) cn.add(0.3, DialogueActItem(dai="reset()")) print cn pdm.da_in(cn) pdm.da_out() cn = DialogueActConfusionNetwork() cn.add(0.99, DialogueActItem(dai="confirm(food=indian)")) print cn pdm.da_in(cn) pdm.da_out() cn = DialogueActConfusionNetwork() cn.add(0.77, DialogueActItem(dai="reqalts()")) print cn pdm.da_in(cn) pdm.da_out() cn = DialogueActConfusionNetwork() cn.add(0.77, DialogueActItem(dai="reqalts()")) print cn pdm.da_in(cn) pdm.da_out() cn = DialogueActConfusionNetwork() cn.add(0.99, DialogueActItem(dai="confirm(food=indian)")) print cn pdm.da_in(cn) pdm.da_out() cn = DialogueActConfusionNetwork() cn.add(0.99, DialogueActItem(dai="request(name)")) cn.add(0.99, DialogueActItem(dai="request(food)")) print cn pdm.da_in(cn) pdm.da_out() cn = DialogueActConfusionNetwork() cn.add(0.99, DialogueActItem(dai="bye()")) print cn pdm.da_in(cn) pdm.da_out()
def parse_X(self, utterance, verbose=False): if verbose: print '=' * 120 print 'Parsing X' print '-' * 120 print unicode(utterance) if self.preprocessing: utterance = self.preprocessing.normalise(utterance) utterance_fvcs = self.get_fvc(utterance) if verbose: print unicode(utterance) print unicode(utterance_fvcs) da_confnet = DialogueActConfusionNetwork() for clser in self.trained_classifiers: if verbose: print "Using classifier: ", unicode(clser) if self.parsed_classifiers[clser].value and self.parsed_classifiers[ clser].value.startswith('CL_'): # process abstracted classifiers for f, v, c in utterance_fvcs: cc = "CL_" + c.upper() if self.parsed_classifiers[clser].value == cc: #print clser, f, v, c classifiers_features = self.get_features( utterance, (f, v, cc), utterance_fvcs) classifiers_inputs = np.zeros( (1, len(self.classifiers_features_mapping[clser]))) classifiers_inputs[ 0] = classifiers_features.get_feature_vector( self.classifiers_features_mapping[clser]) #if verbose: # print classifiers_features # print self.classifiers_features_mapping[clser] p = self.trained_classifiers[clser].predict_proba( classifiers_inputs) if verbose: print ' Probability:', p dai = DialogueActItem( self.parsed_classifiers[clser].dat, self.parsed_classifiers[clser].name, v) da_confnet.add_merge(p[0][1], dai, combine='max') else: # process concrete classifiers classifiers_features = self.get_features( utterance, (None, None, None), utterance_fvcs) classifiers_inputs = np.zeros( (1, len(self.classifiers_features_mapping[clser]))) classifiers_inputs[ 0] = classifiers_features.get_feature_vector( self.classifiers_features_mapping[clser]) #if verbose: # print classifiers_features # print self.classifiers_features_mapping[clser] p = self.trained_classifiers[clser].predict_proba( classifiers_inputs) if verbose: print ' Probability:', p dai = self.parsed_classifiers[clser] da_confnet.add_merge(p[0][1], dai, combine='max') da_confnet.sort().prune() return da_confnet
def parse_1_best(self, obs, verbose=False, *args, **kwargs): """Parse an utterance into a dialogue act. :rtype DialogueActConfusionNetwork """ utterance = obs['utt'] if isinstance(utterance, UtteranceHyp): # Parse just the utterance and ignore the confidence score. utterance = utterance.utterance if verbose: print 'Parsing utterance "{utt}".'.format(utt=utterance) res_cn = DialogueActConfusionNetwork() dict_da = self.utt2da.get(unicode(utterance), None) if dict_da: for dai in DialogueAct(dict_da): res_cn.add(1.0, dai) return res_cn utterance = self.preprocessing.normalise_utterance(utterance) abutterance, category_labels = self.abstract_utterance(utterance) if verbose: print 'After preprocessing: "{utt}".'.format(utt=abutterance) print category_labels self.parse_non_speech_events(utterance, res_cn) utterance = utterance.replace_all(['_noise_'], '').replace_all(['_laugh_'], '').replace_all(['_ehm_hmm_'], '').replace_all(['_inhale_'], '') abutterance = abutterance.replace_all(['_noise_'], '').replace_all(['_laugh_'], '').replace_all(['_ehm_hmm_'], '').replace_all(['_inhale_'], '') abutterance = self.handle_false_abstractions(abutterance) category_labels.add('CITY') category_labels.add('VEHICLE') category_labels.add('NUMBER') if len(res_cn) == 0: if 'STOP' in category_labels: self.parse_stop(abutterance, res_cn) if 'CITY' in category_labels: self.parse_city(abutterance, res_cn) if 'NUMBER' in category_labels: self.parse_number(abutterance) if any([word.startswith("TIME") for word in abutterance]): category_labels.add('TIME') if 'TIME' in category_labels: self.parse_time(abutterance, res_cn) if 'DATE_REL' in category_labels: self.parse_date_rel(abutterance, res_cn) if 'AMPM' in category_labels: self.parse_ampm(abutterance, res_cn) if 'VEHICLE' in category_labels: self.parse_vehicle(abutterance, res_cn) if 'TASK' in category_labels: self.parse_task(abutterance, res_cn) self.parse_meta(utterance, res_cn) res_cn.merge() return res_cn
def test_get_best_nonnull_da(self): dacn = DialogueActConfusionNetwork() dacn.add(0.2, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.1, DialogueActItem(dai='inform(food=russian)')) da_nn = dacn.get_best_nonnull_da() self.assertEqual(len(da_nn), 1) self.assertEqual(da_nn.dais[0], DialogueActItem(dai='inform(food=czech)')) dacn = DialogueActConfusionNetwork() dacn.add(0.075, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='null()')) dacn.add(0.15, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.075, DialogueActItem(dai='inform(food=russian)')) da_nn = dacn.get_best_nonnull_da() self.assertEqual(len(da_nn), 1) self.assertEqual(da_nn.dais[0], DialogueActItem(dai='inform(food=czech)'))
def _build_user_input(self, *args): user_input = DialogueActConfusionNetwork() for arg in args: user_input.add(1.0, DialogueActItem(dai=arg)) return user_input
def process_pending_commands(self): """Process all pending commands. Available commands: stop() - stop processing and exit the process flush() - flush input buffers. Now it only flushes the input connection. Return True if the process should terminate. """ while self.commands.poll(): command = self.commands.recv() if self.cfg['DM']['debug']: self.cfg['Logging']['system_logger'].debug(command) if isinstance(command, Command): if command.parsed['__name__'] == 'stop': return True if command.parsed['__name__'] == 'flush': # discard all data in in input buffers while self.slu_hypotheses_in.poll(): data_in = self.slu_hypotheses_in.recv() self.dm.end_dialogue() self.commands.send(Command("flushed()", 'DM', 'HUB')) return False if command.parsed['__name__'] == 'new_dialogue': self.epilogue_state = None self.dm.new_dialogue() self.cfg['Logging']['session_logger'].turn("system") self.dm.log_state() # I should generate the first DM output da = self.dm.da_out() if self.cfg['DM']['debug']: s = [] s.append("DM Output") s.append("-"*60) s.append(unicode(da)) s.append("") s = '\n'.join(s) self.cfg['Logging']['system_logger'].debug(s) self.cfg['Logging']['session_logger'].dialogue_act("system", da) self.commands.send(DMDA(da, 'DM', 'HUB')) return False if command.parsed['__name__'] == 'end_dialogue': self.dm.end_dialogue() return False if command.parsed['__name__'] == 'timeout': # check whether there is a looong silence # if yes then inform the DM silence_time = command.parsed['silence_time'] cn = DialogueActConfusionNetwork() cn.add(1.0, DialogueActItem('silence','time', silence_time)) # process the input DA self.dm.da_in(cn) self.cfg['Logging']['session_logger'].turn("system") self.dm.log_state() if self.epilogue_state and float(silence_time) > 5.0: # a user was silent for too long, therefore hung up self.cfg['Logging']['session_logger'].dialogue_act("system", self.epilogue_da) self.commands.send(DMDA(self.epilogue_da, 'DM', 'HUB')) self.commands.send(Command('hangup()', 'DM', 'HUB')) else: da = self.dm.da_out() if self.cfg['DM']['debug']: s = [] s.append("DM Output") s.append("-"*60) s.append(unicode(da)) s.append("") s = '\n'.join(s) self.cfg['Logging']['system_logger'].debug(s) self.cfg['Logging']['session_logger'].dialogue_act("system", da) self.commands.send(DMDA(da, 'DM', 'HUB')) if da.has_dat("bye"): self.commands.send(Command('hangup()', 'DM', 'HUB')) return False return False
def test_get_da_nblist(self): # Simple case with one good hypothesis. dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.05, DialogueActItem(dai='inform(food=russian)')) nblist = dacn.get_da_nblist() best_da = nblist.get_best_da() expected_da = DialogueAct(da_str='inform(food=czech)') self.assertEqual(best_da, expected_da) # More good hypotheses dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) nblist = dacn.get_da_nblist() best_da = nblist.get_best_da() expected_da = DialogueAct(da_str='inform(food=czech)&inform(food=russian)') self.assertEqual(best_da, expected_da)
def test_get_da_nblist(self): # Simple case with one good hypothesis. dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.05, DialogueActItem(dai='inform(food=russian)')) nblist = dacn.get_da_nblist() best_da = nblist.get_best_da() expected_da = DialogueAct(da_str='inform(food=czech)') self.assertEqual(best_da, expected_da) # More good hypotheses dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) nblist = dacn.get_da_nblist() best_da = nblist.get_best_da() expected_da = DialogueAct( da_str='inform(food=czech)&inform(food=russian)') self.assertEqual(best_da, expected_da)
def test_get_best_da_hyp(self): # Test case when only one dai should be included in the hyp. dacn = DialogueActConfusionNetwork() dacn.add(0.2, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.1, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp(use_log=False) self.assertAlmostEqual(best_hyp.prob, 0.8 * 0.7 * 0.9) self.assertEqual(len(best_hyp.da), 1) # Test case when 2 dais should be included in the hyp. dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp(use_log=False) self.assertAlmostEqual(best_hyp.prob, 0.9 * 0.7 * 0.9) self.assertEqual(len(best_hyp.da), 2) # Test the case with logarithms. dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp(use_log=True) self.assertAlmostEqual(best_hyp.prob, math.log(0.9 * 0.7 * 0.9)) self.assertEqual(len(best_hyp.da), 2) # Test the case with manual thresholds. dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp( use_log=True, threshold=0.1, thresholds={ DialogueActItem(dai='inform(food=chinese)'): 0.5, DialogueActItem(dai='inform(food=czech)'): 0.9, DialogueActItem(dai='inform(food=russian)'): 0.5 }) # Test food=czech should NOT be included. self.assertAlmostEqual(best_hyp.prob, math.log(0.9 * 0.3 * 0.9)) self.assertEqual(len(best_hyp.da), 1) self.assertTrue(not DialogueActItem(dai='inform(food=czech)') in best_hyp.da) dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp( use_log=True, threshold=0.1, thresholds={ DialogueActItem(dai='inform(food=chinese)'): 0.5, DialogueActItem(dai='inform(food=czech)'): 0.5, DialogueActItem(dai='inform(food=russian)'): 0.5 }) # Test food=czech should be included. self.assertAlmostEqual(best_hyp.prob, math.log(0.9 * 0.7 * 0.9)) self.assertEqual(len(best_hyp.da), 2) self.assertTrue(DialogueActItem(dai='inform(food=czech)') in best_hyp.da)
def test_merge(self): dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)')) dacn.merge(dacn, combine='max') # Russian food should be pruned. dacn.sort().prune() self.assertTrue(not DialogueActItem( dai='inform(food=russian)') in dacn)
def test_merge_slu_confnets(self): confnet1 = DialogueActConfusionNetwork() confnet1.add(0.7, DialogueActItem('hello')) confnet1.add(0.2, DialogueActItem('bye')) confnet2 = DialogueActConfusionNetwork() confnet2.add(0.6, DialogueActItem('hello')) confnet2.add(0.3, DialogueActItem('restart')) confnets = [[0.7, confnet1], [0.3, confnet2]] merged_confnets = merge_slu_confnets(confnets) correct_merged_confnet = DialogueActConfusionNetwork() correct_merged_confnet.add_merge(0.7 * 0.7, DialogueActItem('hello'), combine='add') correct_merged_confnet.add_merge(0.7 * 0.2, DialogueActItem('bye'), combine='add') correct_merged_confnet.add_merge(0.3 * 0.6, DialogueActItem('hello'), combine='add') correct_merged_confnet.add_merge(0.3 * 0.3, DialogueActItem('restart'), combine='add') s = [] s.append("") s.append("Merged confnets:") s.append(unicode(merged_confnets)) s.append("") s.append("Correct merged results:") s.append(unicode(correct_merged_confnet)) s.append("") self.assertEqual(unicode(merged_confnets), unicode(correct_merged_confnet))
def test_get_best_da_hyp(self): # Test case when only one dai should be included in the hyp. dacn = DialogueActConfusionNetwork() dacn.add(0.2, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.1, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp(use_log=False) self.assertAlmostEqual(best_hyp.prob, 0.8 * 0.7 * 0.9) self.assertEqual(len(best_hyp.da), 1) # Test case when 2 dais should be included in the hyp. dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp(use_log=False) self.assertAlmostEqual(best_hyp.prob, 0.9 * 0.7 * 0.9) self.assertEqual(len(best_hyp.da), 2) # Test the case with logarithms. dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp(use_log=True) self.assertAlmostEqual(best_hyp.prob, math.log(0.9 * 0.7 * 0.9)) self.assertEqual(len(best_hyp.da), 2) # Test the case with manual thresholds. dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp( use_log=True, threshold=0.1, thresholds={ DialogueActItem(dai='inform(food=chinese)'): 0.5, DialogueActItem(dai='inform(food=czech)'): 0.9, DialogueActItem(dai='inform(food=russian)'): 0.5 }) # Test food=czech should NOT be included. self.assertAlmostEqual(best_hyp.prob, math.log(0.9 * 0.3 * 0.9)) self.assertEqual(len(best_hyp.da), 1) self.assertTrue(not DialogueActItem( dai='inform(food=czech)') in best_hyp.da) dacn = DialogueActConfusionNetwork() dacn.add(0.1, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.7, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.9, DialogueActItem(dai='inform(food=russian)')) best_hyp = dacn.get_best_da_hyp( use_log=True, threshold=0.1, thresholds={ DialogueActItem(dai='inform(food=chinese)'): 0.5, DialogueActItem(dai='inform(food=czech)'): 0.5, DialogueActItem(dai='inform(food=russian)'): 0.5 }) # Test food=czech should be included. self.assertAlmostEqual(best_hyp.prob, math.log(0.9 * 0.7 * 0.9)) self.assertEqual(len(best_hyp.da), 2) self.assertTrue( DialogueActItem(dai='inform(food=czech)') in best_hyp.da)
def parse_1_best(self, obs, verbose=False): """Parse an utterance into a dialogue act.""" utterance = obs['utt'] if isinstance(utterance, UtteranceHyp): # Parse just the utterance and ignore the confidence score. utterance = utterance.utterance # print 'Parsing utterance "{utt}".'.format(utt=utterance) if verbose: print 'Parsing utterance "{utt}".'.format(utt=utterance) if self.preprocessing: # the text normalisation utterance = self.preprocessing.normalise_utterance(utterance) abutterance, category_labels = self.abstract_utterance(utterance) if verbose: print 'After preprocessing: "{utt}".'.format(utt=abutterance) print category_labels else: category_labels = dict() # handle false positive alarms of abstraction abutterance = abutterance.replace(('STOP=Metra',), ('metra',)) abutterance = abutterance.replace(('STOP=Nádraží',), ('nádraží',)) abutterance = abutterance.replace(('STOP=SME',), ('sme',)) abutterance = abutterance.replace(('STOP=Bílá Hora', 'STOP=Železniční stanice',), ('STOP=Bílá Hora', 'železniční stanice',)) abutterance = abutterance.replace(('TIME=now','bych', 'chtěl'), ('teď', 'bych', 'chtěl')) abutterance = abutterance.replace(('STOP=Čím','se'), ('čím', 'se',)) abutterance = abutterance.replace(('STOP=Lužin','STOP=Na Chmelnici',), ('STOP=Lužin','na','STOP=Chmelnici',)) abutterance = abutterance.replace(('STOP=Konečná','zastávka'), ('konečná', 'zastávka',)) abutterance = abutterance.replace(('STOP=Konečná','STOP=Anděl'), ('konečná', 'STOP=Anděl',)) abutterance = abutterance.replace(('STOP=Konečná stanice','STOP=Ládví'), ('konečná', 'stanice', 'STOP=Ládví',)) abutterance = abutterance.replace(('STOP=Výstupní', 'stanice', 'je'), ('výstupní', 'stanice', 'je')) abutterance = abutterance.replace(('STOP=Nová','jiné'), ('nové', 'jiné',)) abutterance = abutterance.replace(('STOP=Nová','spojení'), ('nové', 'spojení',)) abutterance = abutterance.replace(('STOP=Nová','zadání'), ('nové', 'zadání',)) abutterance = abutterance.replace(('STOP=Nová','TASK=find_connection'), ('nový', 'TASK=find_connection',)) abutterance = abutterance.replace(('z','CITY=Liberk',), ('z', 'CITY=Liberec',)) abutterance = abutterance.replace(('do','CITY=Liberk',), ('do', 'CITY=Liberec',)) abutterance = abutterance.replace(('pauza','hrozně','STOP=Dlouhá',), ('pauza','hrozně','dlouhá',)) abutterance = abutterance.replace(('v','STOP=Praga',), ('v', 'CITY=Praha',)) abutterance = abutterance.replace(('na','STOP=Praga',), ('na', 'CITY=Praha',)) abutterance = abutterance.replace(('po','STOP=Praga', 'ale'), ('po', 'CITY=Praha',)) abutterance = abutterance.replace(('jsem','v','STOP=Metra',), ('jsem', 'v', 'VEHICLE=metro',)) category_labels.add('CITY') category_labels.add('VEHICLE') # print 'After preprocessing: "{utt}".'.format(utt=abutterance) # print category_labels res_cn = DialogueActConfusionNetwork() self.parse_non_speech_events(utterance, res_cn) if len(res_cn) == 0: # remove non speech events, they are not relevant for SLU abutterance = abutterance.replace_all('_noise_', '').replace_all('_laugh_', '').replace_all('_ehm_hmm_', '').replace_all('_inhale_', '') if 'STOP' in category_labels: self.parse_stop(abutterance, res_cn) if 'CITY' in category_labels: self.parse_city(abutterance, res_cn) if 'TIME' in category_labels: self.parse_time(abutterance, res_cn) if 'DATE_REL' in category_labels: self.parse_date_rel(abutterance, res_cn) if 'AMPM' in category_labels: self.parse_ampm(abutterance, res_cn) if 'VEHICLE' in category_labels: self.parse_vehicle(abutterance, res_cn) if 'TASK' in category_labels: self.parse_task(abutterance, res_cn) self.parse_meta(utterance, res_cn) res_cn.merge() return res_cn
def test_normalise(self): dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(1.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)')) self.assertRaises(ConfusionNetworkException, dacn.normalise)
def parse_1_best(self, obs, verbose=False): """Parse an utterance into a dialogue act.""" utterance = obs['utt'] if isinstance(utterance, UtteranceHyp): # Parse just the utterance and ignore the confidence score. utterance = utterance.utterance # print 'Parsing utterance "{utt}".'.format(utt=utterance) if verbose: print 'Parsing utterance "{utt}".'.format(utt=utterance) if self.preprocessing: # the text normalisation utterance = self.preprocessing.normalise_utterance(utterance) abutterance, category_labels = self.abstract_utterance(utterance) if verbose: print 'After preprocessing: "{utt}".'.format(utt=abutterance) print category_labels else: category_labels = dict() # handle false positive alarms of abstraction abutterance = abutterance.replace(('STOP=Metra', ), ('metra', )) abutterance = abutterance.replace(('STOP=Nádraží', ), ('nádraží', )) abutterance = abutterance.replace(('STOP=SME', ), ('sme', )) abutterance = abutterance.replace(( 'STOP=Bílá Hora', 'STOP=Železniční stanice', ), ( 'STOP=Bílá Hora', 'železniční stanice', )) abutterance = abutterance.replace(('TIME=now', 'bych', 'chtěl'), ('teď', 'bych', 'chtěl')) abutterance = abutterance.replace(('STOP=Čím', 'se'), ( 'čím', 'se', )) abutterance = abutterance.replace(( 'STOP=Lužin', 'STOP=Na Chmelnici', ), ( 'STOP=Lužin', 'na', 'STOP=Chmelnici', )) abutterance = abutterance.replace(('STOP=Konečná', 'zastávka'), ( 'konečná', 'zastávka', )) abutterance = abutterance.replace(('STOP=Konečná', 'STOP=Anděl'), ( 'konečná', 'STOP=Anděl', )) abutterance = abutterance.replace( ('STOP=Konečná stanice', 'STOP=Ládví'), ( 'konečná', 'stanice', 'STOP=Ládví', )) abutterance = abutterance.replace(('STOP=Výstupní', 'stanice', 'je'), ('výstupní', 'stanice', 'je')) abutterance = abutterance.replace(('STOP=Nová', 'jiné'), ( 'nové', 'jiné', )) abutterance = abutterance.replace(('STOP=Nová', 'spojení'), ( 'nové', 'spojení', )) abutterance = abutterance.replace(('STOP=Nová', 'zadání'), ( 'nové', 'zadání', )) abutterance = abutterance.replace( ('STOP=Nová', 'TASK=find_connection'), ( 'nový', 'TASK=find_connection', )) abutterance = abutterance.replace(( 'z', 'CITY=Liberk', ), ( 'z', 'CITY=Liberec', )) abutterance = abutterance.replace(( 'do', 'CITY=Liberk', ), ( 'do', 'CITY=Liberec', )) abutterance = abutterance.replace(( 'pauza', 'hrozně', 'STOP=Dlouhá', ), ( 'pauza', 'hrozně', 'dlouhá', )) abutterance = abutterance.replace(( 'v', 'STOP=Praga', ), ( 'v', 'CITY=Praha', )) abutterance = abutterance.replace(( 'na', 'STOP=Praga', ), ( 'na', 'CITY=Praha', )) abutterance = abutterance.replace(('po', 'STOP=Praga', 'ale'), ( 'po', 'CITY=Praha', )) abutterance = abutterance.replace(( 'jsem', 'v', 'STOP=Metra', ), ( 'jsem', 'v', 'VEHICLE=metro', )) category_labels.add('CITY') category_labels.add('VEHICLE') # print 'After preprocessing: "{utt}".'.format(utt=abutterance) # print category_labels res_cn = DialogueActConfusionNetwork() self.parse_non_speech_events(utterance, res_cn) if len(res_cn) == 0: # remove non speech events, they are not relevant for SLU abutterance = abutterance.replace_all('_noise_', '').replace_all( '_laugh_', '').replace_all('_ehm_hmm_', '').replace_all('_inhale_', '') if 'STOP' in category_labels: self.parse_stop(abutterance, res_cn) if 'CITY' in category_labels: self.parse_city(abutterance, res_cn) if 'TIME' in category_labels: self.parse_time(abutterance, res_cn) if 'DATE_REL' in category_labels: self.parse_date_rel(abutterance, res_cn) if 'AMPM' in category_labels: self.parse_ampm(abutterance, res_cn) if 'VEHICLE' in category_labels: self.parse_vehicle(abutterance, res_cn) if 'TASK' in category_labels: self.parse_task(abutterance, res_cn) self.parse_meta(utterance, res_cn) res_cn.merge() return res_cn
def parse_1_best(self, obs, verbose=False, *args, **kwargs): """Parse an utterance into a dialogue act. :rtype DialogueActConfusionNetwork """ utterance = obs['utt'] if isinstance(utterance, UtteranceHyp): # Parse just the utterance and ignore the confidence score. utterance = utterance.utterance if verbose: print 'Parsing utterance "{utt}".'.format(utt=utterance) res_cn = DialogueActConfusionNetwork() dict_da = self.utt2da.get(unicode(utterance), None) if dict_da: for dai in DialogueAct(dict_da): res_cn.add(1.0, dai) return res_cn utterance = self.preprocessing.normalise_utterance(utterance) abutterance, category_labels = self.abstract_utterance(utterance) if verbose: print 'After preprocessing: "{utt}".'.format(utt=abutterance) print category_labels self.parse_non_speech_events(utterance, res_cn) utterance = utterance.replace_all(['_noise_'], '').replace_all( ['_laugh_'], '').replace_all(['_ehm_hmm_'], '').replace_all(['_inhale_'], '') abutterance = abutterance.replace_all(['_noise_'], '').replace_all( ['_laugh_'], '').replace_all(['_ehm_hmm_'], '').replace_all(['_inhale_'], '') abutterance = self.handle_false_abstractions(abutterance) category_labels.add('CITY') category_labels.add('VEHICLE') category_labels.add('NUMBER') if len(res_cn) == 0: if 'STOP' in category_labels: self.parse_stop(abutterance, res_cn) if 'CITY' in category_labels: self.parse_city(abutterance, res_cn) if 'NUMBER' in category_labels: self.parse_number(abutterance) if any([word.startswith("TIME") for word in abutterance]): category_labels.add('TIME') if 'TIME' in category_labels: self.parse_time(abutterance, res_cn) if 'DATE_REL' in category_labels: self.parse_date_rel(abutterance, res_cn) if 'AMPM' in category_labels: self.parse_ampm(abutterance, res_cn) if 'VEHICLE' in category_labels: self.parse_vehicle(abutterance, res_cn) if 'TASK' in category_labels: self.parse_task(abutterance, res_cn) self.parse_meta(utterance, res_cn) res_cn.merge() return res_cn
def process_pending_commands(self): """Process all pending commands. Available commands: stop() - stop processing and exit the process flush() - flush input buffers. Now it only flushes the input connection. Return True if the process should terminate. """ while self.commands.poll(): command = self.commands.recv() if self.cfg['DM']['debug']: self.cfg['Logging']['system_logger'].debug(command) if isinstance(command, Command): #Thanh: if command.parsed['__name__'] == 'print_log_dir': print '===***===session-log-dir:', command.source if command.parsed['__name__'] == 'stop': return True if command.parsed['__name__'] == 'flush': # discard all data in in input buffers while self.slu_hypotheses_in.poll(): data_in = self.slu_hypotheses_in.recv() self.dm.end_dialogue() self.commands.send(Command("flushed()", 'DM', 'HUB')) return False #if command.parsed['__name__'] == 'prepare_new_dialogue': #self.dm.new_dialogue() if command.parsed['__name__'] == 'new_dialogue': self.dm.new_dialogue()#thanh change??? self.epilogue_state = None self.cfg['Logging']['session_logger'].turn("system") self.dm.log_state() # I should generate the first DM output da = self.dm.da_out() if self.cfg['DM']['debug']: s = [] s.append("DM Output") s.append("-"*60) s.append(unicode(da)) s.append("") s = '\n'.join(s) self.cfg['Logging']['system_logger'].debug(s) self.cfg['Logging']['session_logger'].dialogue_act("system", da) self.commands.send(DMDA(da, 'DM', 'HUB')) return False if command.parsed['__name__'] == 'end_dialogue': self.dm.end_dialogue() return False if command.parsed['__name__'] == 'timeout': # check whether there is a looong silence # if yes then inform the DM silence_time = command.parsed['silence_time'] cn = DialogueActConfusionNetwork() cn.add(1.0, DialogueActItem('silence','time', silence_time)) # process the input DA self.dm.da_in(cn) self.cfg['Logging']['session_logger'].turn("system") self.dm.log_state() print '----Time out: ', self.epilogue_state, silence_time '''Thanh if self.epilogue_state == 'give_code': # an cant_apply act have been chosen self.cfg['Logging']['session_logger'].dialogue_act("system", self.epilogue_da) self.commands.send(DMDA(self.epilogue_da, 'DM', 'HUB')) self.commands.send(Command('hangup()', 'DM', 'HUB')) return False #''' if self.epilogue_state and float(silence_time) > 5.0: if self.epilogue_state == 'final_question': # and self.final_question_repeated<16: da = DialogueAct('say(text="{text}")'.format(text="Sorry, did you get the correct information?")) #self.final_question_repeated += 1 self.cfg['Logging']['session_logger'].dialogue_act("system", da) self.commands.send(DMDA(da, 'DM', 'HUB')) else: # a user was silent for too long, therefore hung up self.cfg['Logging']['session_logger'].dialogue_act("system", self.epilogue_da) self.commands.send(DMDA(self.epilogue_da, 'DM', 'HUB')) self.commands.send(Command('hangup()', 'DM', 'HUB')) else: da = self.dm.da_out() if self.cfg['DM']['debug']: s = [] s.append("DM Output") s.append("-"*60) s.append(unicode(da)) s.append("") s = '\n'.join(s) self.cfg['Logging']['system_logger'].debug(s) self.cfg['Logging']['session_logger'].dialogue_act("system", da) self.commands.send(DMDA(da, 'DM', 'HUB')) if da.has_dat("bye"): self.commands.send(Command('hangup()', 'DM', 'HUB')) return False return False
def test_make_from_da(self): da = DialogueAct('inform(food=czech)&inform(area=north)') dacn = DialogueActConfusionNetwork.make_from_da(da) self.assertEqual(dacn.get_best_da(), da)
def test_merge(self): dacn = DialogueActConfusionNetwork() dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)')) dacn.add(0.9, DialogueActItem(dai='inform(food=czech)')) dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)')) dacn.merge(dacn, combine='max') # Russian food should be pruned. dacn.sort().prune() self.assertTrue(not DialogueActItem(dai='inform(food=russian)') in dacn)
def parse_X(self, utterance, verbose=False): if verbose: print '='*120 print 'Parsing X' print '-'*120 print unicode(utterance) if self.preprocessing: utterance = self.preprocessing.normalise(utterance) utterance_fvcs = self.get_fvc(utterance) if verbose: print unicode(utterance) print unicode(utterance_fvcs) da_confnet = DialogueActConfusionNetwork() for clser in self.trained_classifiers: if verbose: print "Using classifier: ", unicode(clser) if self.parsed_classifiers[clser].value and self.parsed_classifiers[clser].value.startswith('CL_'): # process abstracted classifiers for f, v, c in utterance_fvcs: cc = "CL_" + c.upper() if self.parsed_classifiers[clser].value == cc: #print clser, f, v, c classifiers_features = self.get_features(utterance, (f, v, cc), utterance_fvcs) classifiers_inputs = np.zeros((1, len(self.classifiers_features_mapping[clser]))) classifiers_inputs[0] = classifiers_features.get_feature_vector(self.classifiers_features_mapping[clser]) #if verbose: # print classifiers_features # print self.classifiers_features_mapping[clser] p = self.trained_classifiers[clser].predict_proba(classifiers_inputs) if verbose: print ' Probability:', p dai = DialogueActItem(self.parsed_classifiers[clser].dat, self.parsed_classifiers[clser].name, v) da_confnet.add_merge(p[0][1], dai, combine='max') else: # process concrete classifiers classifiers_features = self.get_features(utterance, (None, None, None), utterance_fvcs) classifiers_inputs = np.zeros((1, len(self.classifiers_features_mapping[clser]))) classifiers_inputs[0] = classifiers_features.get_feature_vector(self.classifiers_features_mapping[clser]) #if verbose: # print classifiers_features # print self.classifiers_features_mapping[clser] p = self.trained_classifiers[clser].predict_proba(classifiers_inputs) if verbose: print ' Probability:', p dai = self.parsed_classifiers[clser] da_confnet.add_merge(p[0][1], dai, combine='max') da_confnet.sort().prune() return da_confnet