def test(self):
     """
     Call the Attention Dialogue Test
     """
     trace('initializing ...')
     encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
     encoderDecoderModel.test()
    def train(self, use_gpu, gpu_id):
        if use_gpu:
            cuda.check_cuda_available()
        xp = cuda.cupy if gpu_id >= 0 and use_gpu == True else np
        batch_count = 0
        for k, v in self.read_data.total_words_ids.items():
            if k in self.read_data.images_ids:
                image = np.asarray(Image.open(APP_ROOT + "/" + self.read_data.images_ids[k])).transpose(2, 0, 1)[::-1]
                image = image[:, self.start:self.stop, self.start:self.stop].astype(np.float32)
                image -= self.mean_image

                self.x_batch[batch_count] = image
                self.y_batch[batch_count] = self.trg_vocab.stoi(self.read_data.total_words_ids[k].split()[0])

                if batch_count < self.parameter_dict["minibatch"]:
                    x_data = xp.asarray(self.x_batch)
                    y_data = xp.asarray(self.y_batch)

                    x = chainer.Variable(x_data, volatile=True)
                    t = chainer.Variable(y_data, volatile=True)
                    self.parameter_dict["x"] = x
                    self.parameter_dict["first_word"] = t
                    encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
                    encoderDecoderModel.train()
                    batch_count = 0
                batch_count = batch_count + 1
 def train(self):
     """
     Call the Dialogue Training
     """
     trace('initializing ...')
     encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
     encoderDecoderModel.train()
 def __judge_print(self):
     """
     judge slack call for chainer
     Example:
         chainer:{your sentence}
             chainer return the sentence
         chainer_train:{your sentence}
             start train
     """
     if len(self.data) >= 1 and "text" in self.data[0]:
         print(self.data[0]["text"])
         if "chainer:" in self.data[0]["text"]:
             # input sentence
             src_batch = self.__input_sentence()
             # predict
             hyp_batch = self.__predict_sentence(src_batch)
             # show predict word
             word = ''.join(hyp_batch[0]).replace("</s>", "")
             print(self.slack_channel.api_call("chat.postMessage", user=self.usr, channel=self.chan, text=word))
         if "chainer_train" in self.data[0]["text"]:
             self.__setting_parameter()
             model = AttentionDialogue.load_spec(self.model_name + '.spec', self.XP)
             dialogue = EncoderDecoderModelAttention(self.parameter)
             serializers.load_hdf5(self.model_name + '.weights', model)
             dialogue.attention_dialogue = model
             dialogue.word2vecFlag = False
             dialogue.train()
 def __predict_sentence(self, src_batch):
     """
     predict sentence
     :param src_batch: get the source sentence
     :return:
     """
     dialogue = EncoderDecoderModelAttention(self.parameter)
     src_vocab = Vocabulary.load(self.model_name + '.srcvocab')
     trg_vocab = Vocabulary.load(self.model_name + '.trgvocab')
     model = AttentionDialogue.load_spec(self.model_name + '.spec', self.XP)
     serializers.load_hdf5(self.model_name + '.weights', model)
     hyp_batch = dialogue.forward_implement(src_batch, None, src_vocab, trg_vocab, model, False, self.generation_limit)
     return hyp_batch
 def __predict_sentence(self, src_batch):
     """
     predict sentence
     :param src_batch: get the source sentence
     :return:
     """
     dialogue = EncoderDecoderModelAttention(self.parameter)
     src_vocab = Vocabulary.load(self.model_name + '.srcvocab')
     trg_vocab = Vocabulary.load(self.model_name + '.trgvocab')
     model = AttentionDialogue.load_spec(self.model_name + '.spec', self.XP)
     serializers.load_hdf5(self.model_name + '.weights', model)
     hyp_batch = dialogue.forward_implement(src_batch, None, src_vocab,
                                            trg_vocab, model, False,
                                            self.generation_limit)
     return hyp_batch
 def __judge_print(self):
     """
     judge slack call for chainer
     Example:
         chainer:{your sentence}
             chainer return the sentence
         chainer_train:{your sentence}
             start train
     """
     if len(self.data) >= 1 and "text" in self.data[0]:
         print(self.data[0]["text"])
         if "chainer:" in self.data[0]["text"]:
             # input sentence
             src_batch = self.__input_sentence()
             # predict
             hyp_batch = self.__predict_sentence(src_batch)
             # show predict word
             word = ''.join(hyp_batch[0]).replace("</s>", "")
             print(
                 self.slack_channel.api_call("chat.postMessage",
                                             user=self.usr,
                                             channel=self.chan,
                                             text=word))
         if "chainer_train" in self.data[0]["text"]:
             self.__setting_parameter()
             model = AttentionDialogue.load_spec(self.model_name + '.spec',
                                                 self.XP)
             dialogue = EncoderDecoderModelAttention(self.parameter)
             serializers.load_hdf5(self.model_name + '.weights', model)
             dialogue.attention_dialogue = model
             dialogue.word2vecFlag = False
             dialogue.train()
 def train_mulit_model(self):
     """
     Call the Dialogue Training for multi model
     """
     trace('initializing ...')
     train_path = APP_ROOT + "/../twitter/data/"
     file_list = os.listdir(train_path)
     twitter_source_dict = {}
     twitter_replay_dict = {}
     for file in file_list:
         word_class = re.sub("_replay_twitter_data\.txt|_source_twitter_data\.txt", "", file.strip())
         if word_class not in twitter_source_dict:
             twitter_source_dict.update({word_class: file.strip()})
         if word_class not in twitter_replay_dict:
             twitter_replay_dict.update({word_class: file.strip()})
     for word_class in twitter_source_dict.keys():
         self.parameter_dict["source"] = train_path + word_class + "_source_twitter_data.txt"
         print(self.parameter_dict["source"])
         self.parameter_dict["target"] = train_path + word_class + "_replay_twitter_data.txt"
         print(self.parameter_dict["target"])
         self.parameter_dict["model"] = "ChainerDialogue_" + word_class
         encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
         encoderDecoderModel.train()