예제 #1
0
    def __init__(self):
        self.bow_enc = BoW_encoder()
        self.emb = UtteranceEmbed()
        self.at = ActionTracker(interact=True)
        print(self.at.dialog_acts)
        _, _, self.other_da, self.other_da_id = self.at.get_data()
        self.action_templates = self.at.action_templates
        self.action_size = self.at.action_size
        #self.responses = [r[0] for r in self.at.responses]
        #print(self.emb.dim , self.bow_enc.vocab_size , self.action_size)
        self.obs_size = self.emb.dim + self.bow_enc.vocab_size + self.action_size
        nb_hidden = 128
        self.exercises = list(self.at.action_templates.keys())
        self.net = LSTM_net(obs_size=self.obs_size,
                            action_size=self.action_size,
                            nb_hidden=nb_hidden)
        # restore checkpoint
        self.net.restore()

        # setup remote interaction via zeromq
        context = zmq.Context.instance()
        # PUB socket for sending system-output
        self.pubSocket: zmq.Socket = context.socket(zmq.PUB)
        self.pubSocket.bind(Config.SYSTEM_OUTPUT_ADDR)
        # SUB socket for receiving user-input
        self.subSocket: zmq.Socket = context.socket(zmq.SUB)
        self.subSocket.connect(Config.USER_INPUT_ADDR)
        self.subSocket.setsockopt_string(zmq.SUBSCRIBE,
                                         Config.USER_INPUT_SUBJECT)
        self.lastOutputMessage: str = None
        self.requestHandler: threading.Thread = None
        self.requestInterrupted: threading.Lock = None
예제 #2
0
    def __init__(self):
        import os
        #实体追踪
        et = EntityTracker()
        #词袋 word2vec
        self.bow_enc = BoW_encoder()
        #加载word2vec embedding
        self.emb = UtteranceEmbed()
        #将实体追踪器添加到动作追踪器中
        at = ActionTracker(et)
        #得到数据集和对话开始 结束行数
        self.dataset, dialog_indices = Data(et, at).trainset
        #划分数据集:200对做训练 50对做测试
        self.dialog_indices_tr = dialog_indices
        self.dialog_indices_dev = dialog_indices
        #obs_size 300维的词向量 + 85个袋中的词 + 4个槽位
        obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features

        #话术模板
        self.action_templates = at.get_action_templates()
        #动作个数
        action_size = at.action_size
        #隐藏层神经元个数
        nb_hidden = 128

        self.net = LSTM_net(obs_size=obs_size,
                            action_size=action_size,
                            nb_hidden=nb_hidden)
예제 #3
0
    def __init__(self):
    
        et = EntityTracker()
        self.bow_enc = BoW_encoder()
        self.emb = UtteranceEmbed()
        
        at = ActionTracker(et)
        
        self.train_dataset, train_dialog_indices = Data(et, at).train_set
        self.test_dataset, test_dialog_indices = Data(et, at).test_set
        
        print('=========================\n')
        print('length of Train dialog indices : ', len(train_dialog_indices))
        print('=========================\n')

        print('=========================\n')
        print('length of Test dialog indices : ', len(test_dialog_indices))
        print('=========================\n')
        
        # Shuffle Training Dataset
        random.shuffle(train_dialog_indices)
        
        self.dialog_indices_tr = train_dialog_indices
        self.dialog_indices_dev = test_dialog_indices

        obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features
        self.action_templates = at.get_action_templates()
        action_size = at.action_size
        
        # nb_hidden = 128
        nb_hidden = 150
        
        print('=========================\n')
        print('Action_templates: ', action_size)
        print('=========================\n')

        self.net = LSTM_net(obs_size=obs_size,
                       action_size=action_size,
                       nb_hidden=nb_hidden)
        
        self.et = et
        self.at = at
        
        action_projection = []
        for action in self.action_templates:
            action_projection.append(self.emb.encode(action))
        self.action_projection = np.transpose(action_projection)
        self.action_size = action_size
예제 #4
0
def main(in_dataset_folder, in_model_folder, in_no_ood_evaluation):
    rev_vocab, kb, action_templates, config = load_model(in_model_folder)
    test_dialogs, test_indices = read_dialogs(os.path.join(
        in_dataset_folder, 'dialog-babi-task6-dstc2-tst.txt'),
                                              with_indices=True)

    et = EntityTracker(kb)
    at = ActionTracker(None, et)
    at.set_action_templates(action_templates)

    vocab = {word: idx for idx, word in enumerate(rev_vocab)}
    X, action_masks, sequence_masks, y = make_dataset(
        test_dialogs, test_indices, vocab, et, at, config['max_input_length'])

    net = LSTM_net(config, X.shape[-1], action_masks.shape[-1])
    net.restore(in_model_folder)
    eval_stats_full_dataset = evaluate_advanced(
        net, (X, action_masks, sequence_masks, y), test_indices,
        at.action_templates)
    print(
        'Full dataset: {} turns overall, {} turns after the first OOD'.format(
            eval_stats_full_dataset['total_turns'],
            eval_stats_full_dataset['total_turns_after_ood']))
    print('Accuracy:')
    accuracy = eval_stats_full_dataset[
        'correct_turns'] / eval_stats_full_dataset['total_turns']
    accuracy_after_ood = eval_stats_full_dataset['correct_turns_after_ood'] / eval_stats_full_dataset['total_turns_after_ood'] \
        if eval_stats_full_dataset['total_turns_after_ood'] != 0 \
        else 0
    print('overall: {:.3f}; after first OOD: {:.3f}'.format(
        accuracy, accuracy_after_ood))
    print('Loss : {:.3f}'.format(eval_stats_full_dataset['avg_loss']))

    if in_no_ood_evaluation:
        eval_stats_no_ood = evaluate_advanced(
            net, (X, action_masks, sequence_masks, y),
            test_indices,
            at.action_templates,
            ignore_ood_accuracy=True)
        print('Accuracy (OOD turns ignored):')
        accuracy = eval_stats_no_ood['correct_turns'] / eval_stats_no_ood[
            'total_turns']
        accuracy_after_ood = eval_stats_no_ood['correct_turns_after_ood'] / eval_stats_no_ood['total_turns_after_ood'] \
            if eval_stats_no_ood['total_turns_after_ood'] != 0 \
            else 0
        print('overall: {:.3f}; after first OOD: {:.3f}'.format(
            accuracy, accuracy_after_ood))
        print('Loss : {:.3f}'.format(eval_stats_no_ood['avg_loss']))
예제 #5
0
    def __init__(self):

        et = EntityTracker()
        self.bow_enc = BoW_encoder()
        self.emb = UtteranceEmbed()
        at = ActionTracker(et)

        obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features
        self.action_templates = at.get_action_templates()
        action_size = at.action_size
        nb_hidden = 128

        self.net = LSTM_net(obs_size=obs_size, action_size=action_size, nb_hidden=nb_hidden)

        # restore checkpoint
        self.net.restore()
예제 #6
0
    def __init__(self):

        et = EntityTracker()
        self.bow_enc = BoW_encoder()
        self.emb = UtteranceEmbed()
        at = ActionTracker(et)

        self.dataset, dialog_indices = Data(et, at).trainset
        self.dialog_indices_tr = dialog_indices[:200]
        self.dialog_indices_dev = dialog_indices[200:250]

        obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features
        self.action_templates = at.get_action_templates()
        action_size = at.action_size
        nb_hidden = 128

        self.net = LSTM_net(obs_size=obs_size,
                            action_size=action_size,
                            nb_hidden=nb_hidden)
        def __init__(self, train_ratio=15/20, \
                                                        epochs=100, \
                                                        train_whole=True, \
                                                        ):

                self.train_ratio = train_ratio
                self.epochs = epochs
                

                #flag für fehleranalyse
                self.startCountingErrors = False
                self.count_all_one_eval = 0 
                self.count_all_one_train = 0 
                self.count_all_train = 0
                self.count_all_eval = 0
                #data.trainset = [(u,da_indx), ...]
                
                #HIER acttemps und size
                self.at = ActionTracker()
                self.action_templates = self.at.action_templates
                self.at.dialog_acts = self.at.dialog_acts
                self.at.dialog_act_ids = self.at.dialog_act_ids
                self.dataset, self.dialog_indices, self.other_da, self.other_da_id = self.at.get_data()
                self.action_size = self.at.action_size
                self.accs = []
                self.loss = []
                self.dev_accs =[]
                self.dev_loss=[]
                self.countErrors = Counter()
                self.countPredicitions = 0
                self.errorLog = {}
                self.highest_accuracy = 0
                #pickle.load(open("data/errors/errorLog.p", "rb"))
                if train_whole:
                        self.bow_enc = BoW_encoder()
                        self.emb = UtteranceEmbed()
                        #HIER numfeats
                        obs_size = self.emb.dim + self.bow_enc.vocab_size + self.action_size# + self.et.size_context_features
                        nb_hidden = 128
                        self.net = LSTM_net(obs_size,
                                                                        self.action_size,
                                                                        nb_hidden=nb_hidden)
예제 #8
0
    def __init__(self):

        et = EntityTracker()
        self.bow_enc = BoW_encoder()
        self.emb = UtteranceEmbed()
        at = ActionTracker(et)
        '''
        ['any preference on a type of cuisine', 'api_call <cuisine> <location> <party_size> <rest_type>', 'great let me do the reservation', 'hello what can i help you with today', 'here it is <info_address>', 'here it is <info_phone>', 'how many people would be in your party', "i'm on it", 'is there anything i can help you with', 'ok let me look into some options for you', 'sure is there anything else to update', 'sure let me find an other option for you', 'what do you think of this option: <restaurant>', 'where should it be', 'which price range are looking for', "you're welcome"]

        '''
        self.dataset, dialog_indices = Data(et, at).trainset
        self.dialog_indices_tr = dialog_indices[:200]
        self.dialog_indices_dev = dialog_indices[200:250]

        obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features
        self.action_templates = at.get_action_templates()
        action_size = at.action_size
        nb_hidden = 128

        self.net = LSTM_net(obs_size=obs_size,
                            action_size=action_size,
                            nb_hidden=nb_hidden)
    def __init__(self):

        et = EntityTracker()
        self.bow_enc = BoW_encoder()
        self.emb = UtteranceEmbed()
        at = ActionTracker(et)

        self.dataset, dialog_indices = Data(et, at).trainset
        
        train_indices = joblib.load('data/train_test_list/train_indices_759')
        test_indices = joblib.load('data/train_test_list/test_indices_759_949')
        
        self.dialog_indices_tr = train_indices
        self.dialog_indices_dev = test_indices

        obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features
        self.action_templates = at.get_action_templates()
        action_size = at.action_size
        nb_hidden = 128

        self.net = LSTM_net(obs_size=obs_size,
                       action_size=action_size,
                       nb_hidden=nb_hidden)