Beispiel #1
0
def init(data_dir, task_id, OOV=False):
    # load candidates
    candidates, candid2indx = load_candidates(
        data_dir, task_id)
    n_cand = len(candidates)
    print("Candidate Size", n_cand)
    indx2candid = dict(
        (candid2indx[key], key) for key in candid2indx)

    # load task data
    train_data, test_data, val_data = load_dialog_task(
        data_dir, task_id, candid2indx, OOV)
    data = train_data + test_data + val_data

    # build parameters
    word_idx, sentence_size, \
    candidate_sentence_size, memory_size, \
    vocab_size = build_vocab(data, candidates)

    # Variable(torch.from_numpy(candidates_vec)).view(len(candidates), sentence_size)
    candidates_vec = vectorize_candidates(
        candidates, word_idx, candidate_sentence_size)

    return candid2indx, \
           indx2candid, \
           candidates_vec, \
           word_idx, \
           sentence_size, \
           candidate_sentence_size, \
           memory_size, \
           vocab_size, \
           train_data, test_data, val_data
    def __init__(self, data_dir, model_dir, task_id, isInteractive=True, OOV=False, memory_size=50, random_state=None,
                 batch_size=32, learning_rate=0.001, epsilon=1e-8, max_grad_norm=40.0, evaluation_interval=10, hops=3,
                 epochs=200, embedding_size=20,intro_times=20):
        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        # self.isTrain=isTrain
        self.isInteractive = isInteractive
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size
        self.intro_times=intro_times

        candidates, self.candid2indx = load_candidates(
            self.data_dir, self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)
        # task data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData

        self.build_vocab(data, candidates)
        #build training words set
        # pdb.set_trace()
        self.train_val_wordset = self.words_set(self.valData+self.trainData)
        all_wordset = self.words_set(data)
        no_oov_word = len(self.train_val_wordset)
        with_oov_word = len(all_wordset)
        print('oov words', with_oov_word - no_oov_word)
        # new_words=[]
        # for word in all_wordset:
        #     if word not in self.train_val_wordset:
        #         new_words.append(self.idx_word[word])
        # print('These words are new:',new_words)
        # pdb.set_trace()
        # self.candidates_vec=vectorize_candidates_sparse(candidates,self.word_idx)
        self.candidates_vec = vectorize_candidates(
            candidates, self.word_idx, self.candidate_sentence_size)
        optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate, epsilon=self.epsilon)
        self.sess = tf.Session()
        self.model = MemN2NDialog(self.batch_size, self.vocab_size, self.n_cand, self.sentence_size,
                                  self.embedding_size, self.candidates_vec, session=self.sess,
                                  hops=self.hops, max_grad_norm=self.max_grad_norm, optimizer=optimizer,
                                  task_id=task_id,introspection_times=self.intro_times)
        self.saver = tf.train.Saver(max_to_keep=1)

        self.summary_writer = tf.summary.FileWriter(
            self.model.root_dir, self.model.graph_output.graph)
Beispiel #3
0
    def __init__(self,
                 data_dir,
                 model_dir,
                 task_id,
                 isInteractive=True,
                 OOV=False,
                 memory_size=50,
                 random_state=None,
                 batch_size=32,
                 learning_rate=0.001,
                 epsilon=1e-8,
                 max_grad_norm=40.0,
                 evaluation_interval=10,
                 hops=3,
                 epochs=200,
                 embedding_size=20):

        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        self.isInteractive = isInteractive
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size

        candidates, self.candid2indx = load_candidates(self.data_dir,
                                                       self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)
        # task data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData
        self.build_vocab(data, candidates)
        # self.candidates_vec=vectorize_candidates_sparse(candidates,self.word_idx)
        self.candidates_vec = vectorize_candidates(
            candidates, self.word_idx, self.candidate_sentence_size)
        self.model = MemN2NDialog(self.batch_size,
                                  self.vocab_size,
                                  self.n_cand,
                                  self.sentence_size,
                                  self.embedding_size,
                                  self.candidates_vec,
                                  hops=self.hops,
                                  max_grad_norm=self.max_grad_norm,
                                  task_id=task_id)
    def test_ds(self, dataset_dir):
        _, testData, _ = load_dialog_task(dataset_dir, self.task_id,
                                          self.candid2indx, self.OOV)
        testS, testQ, testA = vectorize_data(testData, self.word_idx,
                                             self.sentence_size,
                                             self.batch_size, self.n_cand,
                                             self.memory_size)
        n_test = len(testS)
        test_preds = self.batch_predict(testS, testQ, n_test)
        test_acc = metrics.accuracy_score(test_preds, testA)

        print('{}: {:.2%}'.format(dataset_dir, test_acc))
	def __init__(self, data_dir, task_id):
		self.data_dir = data_dir
		self.task_id = task_id

		candidates, self.candid2indx = load_candidates(
			self.data_dir, self.task_id)
		self.n_cand = len(candidates)
		print("Candidate Size", self.n_cand)
		self.indx2candid = dict(
		    (self.candid2indx[key], key) for key in self.candid2indx)
		# task data
		self.trainData, self.testData, self.valData = load_dialog_task(
		    self.data_dir, self.task_id, self.candid2indx, False)
		self.data = self.testData
		self.banned_words = ["i", "the"]
		self.pyD = PyDictionary()
    def test_accuracy(self, test_data_dir):
        """
        Compute and return the testing accuracy for the data directory given in argument.
        It is a more general method than `Chatbot.test` as it can be used on different
        datasets than the one given at initialisation.

        :param test_data_dir: Directory's path where to find the testing dataset
        :return: The accuracy score for the testing file
        """
        _, testData, _ = load_dialog_task(test_data_dir, self.task_id,
                                          self.candid2indx, self.OOV)
        testP, testS, testQ, testA = vectorize_data(
            testData, self.word_idx, self.sentence_size, self.batch_size,
            self.n_cand, self.memory_size, self._profiles_mapping)
        test_preds = self.model.batch_predict(testP, testS, testQ)
        test_acc = metrics.accuracy_score(test_preds, testA)

        return test_acc
    def __init__(self, data_dir, model_dir, task_id, isInteractive=True, OOV=False, memory_size=50, random_state=None, batch_size=32, learning_rate=0.001, epsilon=1e-8, max_grad_norm=40.0, evaluation_interval=10, hops=3, epochs=200, embedding_size=20):
        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        # self.isTrain=isTrain
        self.isInteractive = isInteractive
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size

        candidates, self.candid2indx = load_candidates(
            self.data_dir, self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)
        # task data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData
        self.build_vocab(data, candidates)
        # self.candidates_vec=vectorize_candidates_sparse(candidates,self.word_idx)
        self.candidates_vec = vectorize_candidates(
            candidates, self.word_idx, self.candidate_sentence_size)
        optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate, epsilon=self.epsilon)
        self.sess = tf.Session()
        self.model = MemN2NDialog(self.batch_size, self.vocab_size, self.n_cand, self.sentence_size, self.embedding_size, self.candidates_vec, session=self.sess,
                                  hops=self.hops, max_grad_norm=self.max_grad_norm, optimizer=optimizer, task_id=task_id)
        self.saver = tf.train.Saver(max_to_keep=50)

        self.summary_writer = tf.summary.FileWriter(
            self.model.root_dir, self.model.graph_output.graph)
Beispiel #8
0
def prepare_data(task_id, is_oov=False):
    task_id = task_id
    is_oov = is_oov
    # get candidates (restaurants)
    candidates, candid2idx, idx2candid = data_utils.load_candidates(
        task_id=task_id, candidates_f=DATA_DIR + 'dialog-babi-candidates.txt')
    # get data
    train, test, val = data_utils.load_dialog_task(data_dir=DATA_DIR,
                                                   task_id=task_id,
                                                   candid_dic=candid2idx,
                                                   isOOV=is_oov)
    ##
    # get metadata
    metadata = data_utils.build_vocab(train + test + val, candidates)

    ###
    # write data to file
    data_ = {
        'candidates': candidates,
        'train': train,
        'test': test,
        'val': val
    }
    if is_oov:
        with open(P_DATA_DIR + str(task_id) + '_oov.data.pkl', 'wb') as f:
            pkl.dump(data_, f)
    else:
        with open(P_DATA_DIR + str(task_id) + '.data.pkl', 'wb') as f:
            pkl.dump(data_, f)

    ###
    # save metadata to disk
    metadata['candid2idx'] = candid2idx
    metadata['idx2candid'] = idx2candid

    if is_oov:
        with open(P_DATA_DIR + str(task_id) + '_oov.metadata.pkl', 'wb') as f:
            pkl.dump(metadata, f)
    else:
        with open(P_DATA_DIR + str(task_id) + '.metadata.pkl', 'wb') as f:
            pkl.dump(metadata, f)
Beispiel #9
0
    def __init__(self,data_dir,model_dir,task_id,isInteractive=True,OOV=False,memory_size=250,random_state=None,batch_size=32,learning_rate=0.001,epsilon=1e-8,max_grad_norm=40.0,evaluation_interval=10,hops=3,epochs=200,embedding_size=20,save_vocab=False,load_vocab=False):
        self.data_dir=data_dir
        self.task_id=task_id
        self.model_dir=model_dir
        # self.isTrain=isTrain
        self.isInteractive=isInteractive
        self.OOV=OOV
        self.memory_size=memory_size
        self.random_state=random_state
        self.batch_size=batch_size
        self.learning_rate=learning_rate
        self.epsilon=epsilon
        self.max_grad_norm=max_grad_norm
        self.evaluation_interval=evaluation_interval
        self.hops=hops
        self.epochs=epochs
        self.embedding_size=embedding_size
        self.save_vocab=save_vocab
        self.load_vocab=load_vocab

        candidates,self.candid2indx = load_candidates(self.data_dir, self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid= dict((self.candid2indx[key],key) for key in self.candid2indx)
        # task data
        self.trainData, self.testData, self.valData = load_dialog_task(self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData
        self.build_vocab(data,candidates,self.save_vocab,self.load_vocab)
        # self.candidates_vec=vectorize_candidates_sparse(candidates,self.word_idx)
        self.candidates_vec=vectorize_candidates(candidates,self.word_idx,self.candidate_sentence_size)
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=self.epsilon)
        self.sess=tf.Session()
        self.model = MemN2NDialog(self.batch_size, self.vocab_size, self.n_cand, self.sentence_size, self.embedding_size, self.candidates_vec, session=self.sess,
                           hops=self.hops, max_grad_norm=self.max_grad_norm, optimizer=optimizer, task_id=task_id)
        self.saver = tf.train.Saver(max_to_keep=50)
        
        # self.summary_writer = tf.train.SummaryWriter(self.model.root_dir, self.model.graph_output.graph)
        self.summary_writer = tf.summary.FileWriter(self.model.root_dir, self.model.graph_output.graph)
Beispiel #10
0
    def __init__(self,
                 data_dir,
                 model_dir,
                 task_id,
                 OOV=False,
                 memory_size=250,
                 random_state=None,
                 batch_size=32,
                 learning_rate=0.001,
                 epsilon=1e-8,
                 max_grad_norm=40.0,
                 evaluation_interval=10,
                 hops=3,
                 epochs=10,
                 embedding_size=20,
                 save_vocab=False,
                 load_vocab=False):
        """Creates wrapper for training and testing a chatbot model.

        Args:
            data_dir: Directory containing personalized dialog tasks.

            model_dir: Directory containing memn2n model checkpoints.

            task_id: Personalized dialog task id, 1 <= id <= 5. Defaults to `1`.

            OOV: If `True`, use OOV test set. Defaults to `False`

            memory_size: The max size of the memory. Defaults to `250`.

            random_state: Random state to set graph-level random seed. Defaults to `None`.

            batch_size: Size of the batch for training. Defaults to `32`.

            learning_rate: Learning rate for Adam Optimizer. Defaults to `0.001`.

            epsilon: Epsilon value for Adam Optimizer. Defaults to `1e-8`.

            max_gradient_norm: Maximum L2 norm clipping value. Defaults to `40.0`.

            evaluation_interval: Evaluate and print results every x epochs.
            Defaults to `10`.

            hops: The number of hops over memory for responding. A hop consists
            of reading and addressing a memory slot. Defaults to `3`.

            epochs: Number of training epochs. Defualts to `200`.

            embedding_size: The size of the word embedding. Defaults to `20`.

            save_vocab: If `True`, save vocabulary file. Defaults to `False`.

            load_vocab: If `True`, load vocabulary from file. Defaults to `False`.
        """

        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size
        self.save_vocab = save_vocab
        self.load_vocab = load_vocab

        candidates, self.candid2indx = load_candidates(self.data_dir,
                                                       self.task_id)
        self.n_cand = len(candidates)
        # print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)

        # Task data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)

        # print(self.testData)
        data = self.trainData + self.testData + self.valData

        self.build_vocab(data, candidates, self.save_vocab, self.load_vocab)
        print("build_vocab", self.build_vocab)
        self.candidates_vec = vectorize_candidates(
            candidates, self.word_idx, self.candidate_sentence_size)
        print("build_vocab", self.candidates_vec)
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                           epsilon=self.epsilon)

        self.sess = tf.Session()

        self.model = MemN2NDialog(self.batch_size,
                                  self.vocab_size,
                                  self.n_cand,
                                  self.sentence_size,
                                  self.embedding_size,
                                  self.candidates_vec,
                                  session=self.sess,
                                  hops=self.hops,
                                  max_grad_norm=self.max_grad_norm,
                                  optimizer=optimizer,
                                  task_id=task_id)

        self.saver = tf.train.Saver(max_to_keep=50)
Beispiel #11
0
    def __init__(self,
                 data_dir,
                 model_dir,
                 task_id,
                 source,
                 resFlag,
                 wrong_conversations,
                 error,
                 acc_each_epoch,
                 acc_ten_epoch,
                 conv_wrong_right,
                 epochs,
                 OOV=False,
                 memory_size=50,
                 random_state=None,
                 batch_size=32,
                 learning_rate=0.001,
                 epsilon=1e-8,
                 max_grad_norm=40.0,
                 evaluation_interval=10,
                 hops=3,
                 embedding_size=20):
        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size
        self.source = source
        self.resFlag = resFlag
        self.wrong_conversations = wrong_conversations
        self.error = error
        self.acc_each_epoch = acc_each_epoch
        self.acc_ten_epoch = acc_ten_epoch
        candidates, self.candid2indx = load_candidates(self.data_dir,
                                                       self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)

        # create train, test and validation data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData
        self.build_vocab(data, candidates)

        self.test_acc_list = []
        self.candidates_vec = vectorize_candidates(
            candidates, self.word_idx, self.candidate_sentence_size)
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                           epsilon=self.epsilon)
        self.sess = tf.Session()
        self.model = MemN2NDialog(self.batch_size,
                                  self.vocab_size,
                                  self.n_cand,
                                  self.sentence_size,
                                  self.embedding_size,
                                  self.candidates_vec,
                                  session=self.sess,
                                  hops=self.hops,
                                  max_grad_norm=self.max_grad_norm,
                                  optimizer=optimizer,
                                  task_id=task_id,
                                  source=self.source,
                                  resFlag=self.resFlag,
                                  oov=self.OOV)
        self.saver = tf.train.Saver(max_to_keep=50)
        self.summary_writer = tf.summary.FileWriter(
            self.model.root_dir, self.model.graph_output.graph)
Beispiel #12
0
    def __init__(self,
                 data_dir,
                 model_dir,
                 task_id,
                 isInteractive=True,
                 OOV=False,
                 memory_size=50,
                 random_state=None,
                 batch_size=32,
                 learning_rate=0.001,
                 epsilon=1e-8,
                 max_grad_norm=40.0,
                 evaluation_interval=10,
                 hops=3,
                 epochs=200,
                 embedding_size=100):
        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        # self.isTrain=isTrain
        self.isInteractive = isInteractive
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size
        self.vocab = {}
        self.ivocab = {}
        self.word2vec = {}
        self.word2vec_init = True

        if self.word2vec_init:
            # assert config.embed_size == 100
            self.word2vec = load_glove(self.embedding_size)

        process_word(word="<eos>",
                     word2vec=self.word2vec,
                     vocab=self.vocab,
                     ivocab=self.ivocab,
                     word_vector_size=self.embedding_size,
                     to_return="index")

        # Define uncertain or unknown word index and vec for use later for training out-of-context data
        self.uncertain_word_index = process_word(
            word="sdfsssdf",
            word2vec=self.word2vec,
            vocab=self.vocab,
            ivocab=self.ivocab,
            word_vector_size=self.embedding_size,
            to_return="index")

        candidates, self.candid2indx = load_candidates(self.data_dir,
                                                       self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)
        # task data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData

        self.build_vocab(data, candidates)
        self.set_max_sentence_length()
        # self.candidates_vec=vectorize_candidates_sparse(candidates,self.word_idx)
        self.trainS, self.trainQ, self.trainA = vectorize_data_match(
            self.trainData,
            self.word2vec,
            self.max_sentence_size,
            self.batch_size,
            self.n_cand,
            self.memory_size,
            self.vocab,
            self.ivocab,
            self.embedding_size,
            uncertain=self.uncertain_word_index)
        self.valS, self.valQ, self.valA = vectorize_data_match(
            self.valData,
            self.word2vec,
            self.max_sentence_size,
            self.batch_size,
            self.n_cand,
            self.memory_size,
            self.vocab,
            self.ivocab,
            self.embedding_size,
            uncertain_word=True,
            uncertain=self.uncertain_word_index)

        self.candidates_vec = vectorize_candidates(
            candidates, self.word2vec, self.candidate_sentence_size,
            self.vocab, self.ivocab, self.embedding_size)
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                           epsilon=self.epsilon)
        self.sess = tf.Session()
        # Set max sentence vector size
        self.build_vocab(data, candidates)

        answer_n_hot = np.zeros((self.vocab_size, len(self.candid2indx)))
        for ans_it in range(len(self.indx2candid)):
            ans = self.indx2candid[ans_it]
            n_hot = np.zeros((self.vocab_size, ))
            for w in tokenize(ans):
                assert w in self.word_idx
                n_hot[self.word_idx[w]] = 1
            answer_n_hot[:, ans_it] = n_hot

        # Need to understand more about sentence size. Model failing because sentence size > candidate_sentence_size? Answers longer than queries?
        self.model = MemN2NDialogHybridMatch(self.batch_size,
                                             self.vocab_size,
                                             self.max_sentence_size,
                                             self.memory_size,
                                             self.embedding_size,
                                             answer_n_hot,
                                             match=FLAGS.match,
                                             session=self.sess,
                                             hops=self.hops,
                                             max_grad_norm=self.max_grad_norm,
                                             optimizer=optimizer,
                                             task_id=self.task_id)
        # self.model = MemN2NDialogHybrid(self.batch_size, self.vocab_size, self.n_cand, self.max_sentence_size, self.embedding_size, self.candidates_vec, session=self.sess,
        #                           hops=self.hops, max_grad_norm=self.max_grad_norm, optimizer=optimizer, task_id=task_id)
        self.saver = tf.train.Saver(max_to_keep=50)

        self.summary_writer = tf.summary.FileWriter(
            self.model.root_dir, self.model.graph_output.graph)

        self.kb = parse_kb(FLAGS.kb_file)
    def __init__(self,
                 data_dir,
                 task_id,
                 OOV=False,
                 memory_size=50,
                 train=0,
                 batch_size=32,
                 nn=False):
        self.data_dir = data_dir
        self.task_id = task_id
        self.OOV = OOV
        self.memory_size = memory_size
        self.train = train
        self.batch_size = batch_size
        self.nn = nn
        candidates, self.candid2indx = load_candidates(self.data_dir,
                                                       self.task_id)
        self.n_cand = len(candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData
        self.build_vocab(data, candidates)
        self.candidates_vec = vectorize_candidates(
            candidates, self.word_idx, self.candidate_sentence_size)
        self.params = {
            'n_cand': self.n_cand,
            'indx2candid': self.indx2candid,
            'candid2indx': self.candid2indx,
            'candidates_vec': self.candidates_vec,
            'word_idx': self.word_idx,
            'sentence_size': self.sentence_size,
            'candidate_sentence_size': self.candidate_sentence_size,
            'vocab_size': self.vocab_size
        }

        if self.nn:
            if self.train == 0:
                self.S, self.Q, self.A = vectorize_data(self.trainData,
                                                        self.word_idx,
                                                        self.sentence_size,
                                                        self.batch_size,
                                                        self.n_cand,
                                                        self.memory_size,
                                                        nn=self.nn)
            elif self.train == 1:
                self.S, self.Q, self.A = vectorize_data(self.valData,
                                                        self.word_idx,
                                                        self.sentence_size,
                                                        self.batch_size,
                                                        self.n_cand,
                                                        self.memory_size,
                                                        nn=self.nn)
            elif self.train == 2:
                self.S, self.Q, self.A = vectorize_data(self.testData,
                                                        self.word_idx,
                                                        self.sentence_size,
                                                        self.batch_size,
                                                        self.n_cand,
                                                        self.memory_size,
                                                        nn=self.nn)
        else:
            if self.train == 0:
                self.S, self.Q, self.A = vectorize_data(
                    self.trainData, self.word_idx, self.sentence_size,
                    self.batch_size, self.n_cand, self.memory_size)
            elif self.train == 1:
                self.S, self.Q, self.A = vectorize_data(
                    self.valData, self.word_idx, self.sentence_size,
                    self.batch_size, self.n_cand, self.memory_size)
            elif self.train == 2:
                self.S, self.Q, self.A = vectorize_data(
                    self.testData, self.word_idx, self.sentence_size,
                    self.batch_size, self.n_cand, self.memory_size)
    def __init__(self,
                 data_dir,
                 model_dir,
                 task_id,
                 isInteractive=True,
                 OOV=False,
                 memory_size=250,
                 random_state=None,
                 batch_size=32,
                 learning_rate=0.001,
                 epsilon=1e-8,
                 max_grad_norm=40.0,
                 evaluation_interval=10,
                 hops=3,
                 epochs=200,
                 embedding_size=20,
                 alpha=.5,
                 save_vocab=None,
                 load_vocab=None,
                 verbose=False,
                 load_profiles=None,
                 save_profiles=None):

        self.data_dir = data_dir
        self.task_id = task_id
        self.model_dir = model_dir
        # self.isTrain=isTrain
        self.isInteractive = isInteractive
        self.OOV = OOV
        self.memory_size = memory_size
        self.random_state = random_state
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.max_grad_norm = max_grad_norm
        self.evaluation_interval = evaluation_interval
        self.hops = hops
        self.epochs = epochs
        self.embedding_size = embedding_size
        self.save_vocab = save_vocab
        self.load_vocab = load_vocab
        self.verbose = verbose
        self.alpha = alpha

        # Loading possible answers
        self.candidates, self.candid2indx = load_candidates(
            self.data_dir, self.task_id)
        self.n_cand = len(self.candidates)
        print("Candidate Size", self.n_cand)
        self.indx2candid = dict(
            (self.candid2indx[key], key) for key in self.candid2indx)

        # task data
        self.trainData, self.testData, self.valData = load_dialog_task(
            self.data_dir, self.task_id, self.candid2indx, self.OOV)
        data = self.trainData + self.testData + self.valData

        # Find profiles types
        if load_profiles:
            with open(load_profiles, 'rb') as f:
                self._profiles_mapping = pickle.load(f)
        else:
            self._profiles_mapping = generate_profile_encoding(self.trainData)
            if save_profiles:
                with open(save_profiles, 'wb') as f:
                    pickle.dump(self._profiles_mapping, f)

        profiles_idx_set = set(self._profiles_mapping.values())

        print("Profiles:", self._profiles_mapping)

        # Vocabulary
        self.build_vocab(data, self.candidates, self.save_vocab,
                         self.load_vocab)
        # self.candidates_vec=vectorize_candidates_sparse(self.candidates,self.word_idx)
        self.candidates_vec = vectorize_candidates(
            self.candidates, self.word_idx, self.candidate_sentence_size)

        # Model initialisation
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                           epsilon=self.epsilon)
        self.sess = tf.Session()
        self.model = MemN2NDialog(self.batch_size,
                                  self.vocab_size,
                                  self.n_cand,
                                  self.sentence_size,
                                  self.embedding_size,
                                  self.candidates_vec,
                                  profiles_idx_set,
                                  session=self.sess,
                                  hops=self.hops,
                                  max_grad_norm=self.max_grad_norm,
                                  alpha=alpha,
                                  optimizer=optimizer,
                                  task_id=task_id,
                                  verbose=verbose)
        self.saver = tf.train.Saver(max_to_keep=50)

        # self.summary_writer = tf.train.SummaryWriter(self.model.root_dir, self.model.graph_output.graph)
        self.summary_writer = tf.summary.FileWriter(
            self.model.root_dir, self.model.graph_output.graph)