Ejemplo n.º 1
0
    def train(self):
        batch_manager = BatchManager(self.encoder_vec, self.decoder_vec, self.batch_size)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        loss_track = []
        total_time = 0
        nums_batch = len(batch_manager.batch_data)
        for epoch in range(self.max_epoch):
            print "[->] epoch {}".format(epoch)   
            batch_index = 0
            for batch in batch_manager.batch():
                batch_index += 1
                # 获取fd [time_steps, batch_size]
                fd = self.get_fd(batch, self.model)
                _, loss, logits, labels = self.sess.run([self.model.train_op, 
                                    self.model.loss,
                                    self.model.logits,
                                    self.model.decoder_labels], fd)
                loss_track.append(loss)
                if batch_index % self.show_batch == 0:
                    print "\tstep: {}/{}".format(batch_index, nums_batch)
                    print '\tloss: {}'.format(loss)
                    print "\t"+"-"*50
                checkpoint_path = self.model_path+"chatbot_seq2seq.ckpt"
                # 保存模型
                self.model.saver.save(self.sess, checkpoint_path, global_step=self.model.global_step)
Ejemplo n.º 2
0
    def train(self):
        print("++++++++train+++++++")
        batch_manager = BatchManager(self.encoder_vec, self.decoder_vec,
                                     self.batch_size)

        #用来配置tf的sess,使用gpu还是cpu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        #存放交叉熵结果
        loss_track = []
        total_time = 0
        #算第几轮用的参数
        nums_batch = len(batch_manager.batch_data)
        for epoch in range(self.max_epoch):
            print("[->] epoch {}".format(epoch))
            batch_index = 0
            for batch in batch_manager.batch():
                batch_index += 1
                # 获取fd [time_steps, batch_size]
                fd = self.get_fd(batch, self.model)
                #sess.run计算model的张量tensor,这里利用优化器做优化
                _, loss, logits, labels = self.sess.run([
                    self.model.train_op, self.model.loss, self.model.logits,
                    self.model.decoder_labels
                ], fd)
                loss_track.append(loss)
                if batch_index % self.show_batch == 0:
                    print("\tstep: {}/{}".format(batch_index, nums_batch))
                    print('\tloss: {}'.format(loss))
                    print("\t" + "-" * 50)
                checkpoint_path = self.model_path + "chatbot_seq2seq.ckpt"
                # 保存模型
                self.model.saver.save(self.sess,
                                      checkpoint_path,
                                      global_step=self.model.global_step)