Example #1
0
    def inference(self,
                  request: str,
                  beam_size: int,
                  start_sign: str = "<start>",
                  end_sign: str = "<end>") -> AnyStr:
        """ 对话推断模块

        :param request: 输入句子
        :param beam_size: beam大小
        :param start_sign: 句子开始标记
        :param end_sign: 句子结束标记
        :return: 返回历史指标数据
        """
        tokenizer = load_tokenizer(self.dict_path)

        enc_input = preprocess_request(sentence=request,
                                       tokenizer=tokenizer,
                                       max_length=self.max_sentence,
                                       start_sign=start_sign,
                                       end_sign=end_sign)
        enc_output, padding_mask = self.encoder(inputs=enc_input)
        dec_input = tf.expand_dims([tokenizer.word_index.get(start_sign)], 0)

        beam_search_container = BeamSearch(beam_size=beam_size,
                                           max_length=self.max_sentence,
                                           worst_score=0)
        beam_search_container.reset(enc_output=enc_output,
                                    dec_input=dec_input,
                                    remain=padding_mask)
        enc_output, dec_input, padding_mask = beam_search_container.get_search_inputs(
        )

        for t in range(self.max_sentence):
            predictions = self._inference_one_step(dec_input=dec_input,
                                                   enc_output=enc_output,
                                                   padding_mask=padding_mask)

            beam_search_container.expand(
                predictions=predictions,
                end_sign=tokenizer.word_index.get(end_sign))
            # 注意了,如果BeamSearch容器里的beam_size为0了,说明已经找到了相应数量的结果,直接跳出循环
            if beam_search_container.beam_size == 0:
                break
            enc_output, dec_input, padding_mask = beam_search_container.get_search_inputs(
            )

        beam_search_result = beam_search_container.get_result(top_k=3)
        result = ""
        # 从容器中抽取序列,生成最终结果
        for i in range(len(beam_search_result)):
            temp = beam_search_result[i].numpy()
            text = tokenizer.sequences_to_texts(temp)
            text[0] = text[0].replace(start_sign,
                                      "").replace(end_sign,
                                                  "").replace(" ", "")
            result = "<" + text[0] + ">" + result
        return result
Example #2
0
    def inference(self,
                  request: list,
                  solr: pysolr.Solr,
                  max_utterance: int,
                  d_type: tf.dtypes.DType = tf.float32,
                  *args,
                  **kwargs) -> AnyStr:
        """ 对话推断模块

        :param request: 输入对话历史
        :param solr: solr服务
        :param max_utterance: 每轮最大语句数
        :param d_type: 运算精度
        :return: 返回历史指标数据
        """
        tokenizer = load_tokenizer(self.dict_path)

        history = request[-max_utterance:]
        pad_sequences = [0] * self.max_sentence
        utterance = tokenizer.texts_to_sequences(history)
        utterance_len = len(utterance)

        # 如果当前轮次中的历史语句不足max_utterances数量,需要在尾部进行填充
        if utterance_len != max_utterance:
            utterance = [pad_sequences
                         ] * (max_utterance - utterance_len) + utterance
        utterance = tf.keras.preprocessing.sequence.pad_sequences(
            sequences=utterance, maxlen=self.max_sentence, padding="post")

        tf_idf = get_tf_idf_top_k(history=history, k=5)
        query = "{!func}sum("
        for key in tf_idf:
            query += "product(idf(utterance," + key + "),tf(utterance," + key + ")),"
        query += ")"
        candidates = solr.search(q=query, start=0, rows=10).docs
        candidates = [candidate["utterance"][0] for candidate in candidates]

        if candidates is None:
            return "Sorry! I didn't hear clearly, can you say it again?"
        else:
            utterances = [utterance] * len(candidates)
            responses = tokenizer.texts_to_sequences(candidates)
            responses = tf.keras.preprocessing.sequence.pad_sequences(
                sequences=responses, maxlen=self.max_sentence, padding="post")

            utterances = tf.convert_to_tensor(value=utterances)
            responses = tf.convert_to_tensor(value=responses)
            index = self._inference_one_step(utterances=utterances,
                                             responses=responses)

            return candidates[index]
Example #3
0
    def inference(self,
                  request: str,
                  beam_size: int,
                  start_sign: str = "<start>",
                  end_sign: str = "<end>") -> AnyStr:
        """ 对话推断模块

        :param request: 输入句子
        :param beam_size: beam大小
        :param start_sign: 句子开始标记
        :param end_sign: 句子结束标记
        :return: 返回历史指标数据
        """
        with torch.no_grad():
            tokenizer = load_tokenizer(self.dict_path)
            enc_input = preprocess_request(sentence=request,
                                           tokenizer=tokenizer,
                                           max_length=self.max_sentence,
                                           start_sign=start_sign,
                                           end_sign=end_sign)
            enc_input = torch.tensor(data=enc_input,
                                     dtype=torch.long).permute(1, 0)
            enc_output, states = self.encoder(inputs=enc_input)
            dec_input = torch.tensor(
                data=[[tokenizer.word_index.get(start_sign)]])

            beam_search_container = BeamSearch(beam_size=beam_size,
                                               max_length=self.max_sentence,
                                               worst_score=0)
            beam_search_container.reset(enc_output=enc_output.permute(1, 0, 2),
                                        dec_input=dec_input,
                                        remain=states)
            enc_output, dec_input, states = beam_search_container.get_search_inputs(
            )
            enc_output = enc_output.permute(1, 0, 2)

            for t in range(self.max_sentence):
                predictions, dec_hidden = self.decoder(dec_input, states,
                                                       enc_output)
                predictions = F.softmax(input=predictions, dim=-1)

                beam_search_container.expand(
                    predictions=predictions[0],
                    end_sign=tokenizer.word_index.get(end_sign))
                if beam_search_container.beam_size == 0:
                    break

                enc_output, dec_input, states = beam_search_container.get_search_inputs(
                )
                dec_input = dec_input[:, -1].unsqueeze(-1)
                enc_output = enc_output.permute(1, 0, 2)
                dec_input = dec_input.permute(1, 0)

            beam_search_result = beam_search_container.get_result(top_k=3)
            result = ""
            # 从容器中抽取序列,生成最终结果
            for i in range(len(beam_search_result)):
                temp = beam_search_result[i].numpy()
                text = tokenizer.sequences_to_texts(temp)
                text[0] = text[0].replace(start_sign,
                                          "").replace(end_sign,
                                                      "").replace(" ", "")
                result = "<" + text[0] + ">" + result
            return result
Example #4
0
def load_data(dict_path: str, buffer_size: int, batch_size: int, train_data_type: str, valid_data_type: str,
              max_sentence: int, valid_data_split: float = 0.0, train_data_path: str = "", valid_data_path: str = "",
              max_train_data_size: int = 0, max_valid_data_size: int = 0, **kwargs) -> Tuple:
    """ 数据加载方法

    :param dict_path: 字典路径
    :param buffer_size: Dataset加载缓存大小
    :param batch_size: Dataset加载批大小
    :param train_data_type: 读取训练数据类型,单轮/多轮...
    :param valid_data_type: 读取验证数据类型,单轮/多轮...
    :param max_sentence: 单个句子最大长度
    :param valid_data_split: 用于从训练数据中划分验证数据
    :param train_data_path: 文本数据路径
    :param valid_data_path: 验证数据文本路径
    :param max_train_data_size: 最大训练数据量
    :param max_valid_data_size: 最大验证数据量
    :return: 训练Dataset、验证Dataset、训练数据总共的步数、验证数据总共的步数和检查点前缀
    """
    tokenizer = load_tokenizer(dict_path=dict_path)

    train_flag = True  # 是否开启训练标记
    train_steps_per_epoch = 0
    train_first, train_second, train_third = None, None, None

    valid_flag = True  # 是否开启验证标记
    valid_steps_per_epoch = 0
    valid_first, valid_second, valid_third = None, None, None

    if train_data_path != "":
        train_first, train_second, train_third = read_data(
            data_path=train_data_path, max_data_size=max_train_data_size,
            max_sentence=max_sentence, data_type=train_data_type, tokenizer=tokenizer, **kwargs
        )
    else:
        train_flag = False

    if valid_data_path != "":
        print("读取验证对话对...")
        valid_first, valid_second, valid_third = read_data(
            data_path=valid_data_path, max_data_size=max_valid_data_size,
            max_sentence=max_sentence, data_type=valid_data_type, tokenizer=tokenizer, **kwargs
        )
    elif valid_data_split != 0.0:
        train_size = int(len(train_first) * (1.0 - valid_data_split))
        valid_first = train_first[train_size:]
        valid_second = train_second[train_size:]
        valid_third = train_third[train_size:]
        train_first = train_first[:train_size]
        train_second = train_second[:train_size]
        train_third = train_third[:train_size]
    else:
        valid_flag = False

    if train_flag:
        train_dataset = tf.data.Dataset.from_tensor_slices((train_first, train_second, train_third)).cache().shuffle(
            buffer_size, reshuffle_each_iteration=True).prefetch(tf.data.experimental.AUTOTUNE)
        train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
        train_steps_per_epoch = len(train_first) // batch_size
    else:
        train_dataset = None

    if valid_flag:
        valid_dataset = tf.data.Dataset.from_tensor_slices((valid_first, valid_second, valid_third)) \
            .prefetch(tf.data.experimental.AUTOTUNE)
        valid_dataset = valid_dataset.batch(batch_size, drop_remainder=True)
        valid_steps_per_epoch = len(valid_first) // batch_size
    else:
        valid_dataset = None

    return train_dataset, valid_dataset, train_steps_per_epoch, valid_steps_per_epoch
Example #5
0
def load_data(dict_path: str,
              batch_size: int,
              train_data_type: str,
              valid_data_type: str,
              max_sentence: int,
              valid_data_split: float = 0.0,
              train_data_path: str = "",
              valid_data_path: str = "",
              max_train_data_size: int = 0,
              max_valid_data_size: int = 0,
              num_workers: int = 2,
              **kwargs) -> Tuple:
    """ 数据加载方法

    :param dict_path: 字典路径
    :param batch_size: Dataset加载批大小
    :param train_data_type: 读取训练数据类型,单轮/多轮...
    :param valid_data_type: 读取验证数据类型,单轮/多轮...
    :param max_sentence: 单个句子最大长度
    :param valid_data_split: 用于从训练数据中划分验证数据
    :param train_data_path: 文本数据路径
    :param valid_data_path: 验证数据文本路径
    :param max_train_data_size: 最大训练数据量
    :param max_valid_data_size: 最大验证数据量
    :param num_workers: 数据加载器的工作线程
    :return: 训练Dataset、验证Dataset、训练数据总共的步数、验证数据总共的步数和检查点前缀
    """
    tokenizer = load_tokenizer(dict_path=dict_path)

    train_flag = True  # 是否开启训练标记
    train_steps_per_epoch = 0
    train_first, train_second, train_third = None, None, None

    valid_flag = True  # 是否开启验证标记
    valid_steps_per_epoch = 0
    valid_first, valid_second, valid_third = None, None, None

    if train_data_path != "":
        train_first, train_second, train_third = read_data(
            data_path=train_data_path,
            max_data_size=max_train_data_size,
            max_sentence=max_sentence,
            data_type=train_data_type,
            tokenizer=tokenizer,
            **kwargs)
    else:
        train_flag = False

    if valid_data_path != "":
        print("读取验证对话对...")
        valid_first, valid_second, valid_third = read_data(
            data_path=valid_data_path,
            max_data_size=max_valid_data_size,
            max_sentence=max_sentence,
            data_type=valid_data_type,
            tokenizer=tokenizer,
            **kwargs)
    elif valid_data_split != 0.0:
        train_size = int(len(train_first) * (1.0 - valid_data_split))
        valid_first = train_first[train_size:]
        valid_second = train_second[train_size:]
        valid_third = train_third[train_size:]
        train_first = train_first[:train_size]
        train_second = train_second[:train_size]
        train_third = train_third[:train_size]
    else:
        valid_flag = False

    if train_flag:
        train_dataset = PairDataset(train_first, train_second, train_third)
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  num_workers=num_workers)
        train_steps_per_epoch = len(train_first) // batch_size
    else:
        train_loader = None

    if valid_flag:
        valid_dataset = PairDataset(valid_first, valid_second, valid_third)
        valid_loader = DataLoader(dataset=valid_dataset,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  drop_last=True,
                                  num_workers=num_workers)
        valid_steps_per_epoch = len(valid_first) // batch_size
    else:
        valid_loader = None

    return train_loader, valid_loader, train_steps_per_epoch, valid_steps_per_epoch
Example #6
0
def tf_seq2seq() -> NoReturn:
    parser = ArgumentParser(description="seq2seq chatbot")
    parser.add_argument("--version", default="tf", type=str, required=True, help="执行版本")
    parser.add_argument("--model", default="transformer", type=str, required=True, help="执行模型")
    parser.add_argument("--config_file", default="", type=str, required=False, help="配置文件路径,为空则默认命令行,不为空则使用配置文件参数")
    parser.add_argument("--act", default="pre_treat", type=str, required=False, help="执行类型")
    parser.add_argument("--cell_type", default="lstm", type=str, required=False, help="rnn的cell类型")
    parser.add_argument("--if_bidirectional", default=True, type=bool, required=False, help="是否开启双向rnn")
    parser.add_argument("--units", default=1024, type=int, required=False, help="隐藏层单元数")
    parser.add_argument("--vocab_size", default=20000, type=int, required=False, help="词汇大小")
    parser.add_argument("--embedding_dim", default=512, type=int, required=False, help="嵌入层维度大小")
    parser.add_argument("--encoder_layers", default=6, type=int, required=False, help="encoder的层数")
    parser.add_argument("--decoder_layers", default=6, type=int, required=False, help="decoder的层数")
    parser.add_argument("--max_train_data_size", default=0, type=int, required=False, help="用于训练的最大数据大小")
    parser.add_argument("--max_valid_data_size", default=0, type=int, required=False, help="用于验证的最大数据大小")
    parser.add_argument("--max_sentence", default=40, type=int, required=False, help="单个序列的最大长度")
    parser.add_argument("--dict_path", default="data/preprocess/seq2seq_dict.json",
                        type=str, required=False, help="字典路径")
    parser.add_argument("--checkpoint_dir", default="checkpoints/tensorflow/seq2seq",
                        type=str, required=False, help="检查点路径")
    parser.add_argument("--resource_data_path", default="data/LCCC.json", type=str, required=False, help="原始数据集路径")
    parser.add_argument("--tokenized_data_path", default="data/preprocess/lccc_tokenized.txt",
                        type=str, required=False, help="处理好的多轮分词数据集路径")
    parser.add_argument("--preprocess_data_path", default="data/preprocess/single_tokenized.txt",
                        type=str, required=False, help="处理好的单轮分词数据集路径")
    parser.add_argument("--valid_data_path", default="data/preprocess/single_tokenized.txt", type=str,
                        required=False, help="处理好的单轮分词验证评估用数据集路径")
    parser.add_argument("--history_image_dir", default="data/history/seq2seq/", type=str, required=False,
                        help="数据指标图表保存路径")
    parser.add_argument("--valid_freq", default=5, type=int, required=False, help="验证频率")
    parser.add_argument("--checkpoint_save_freq", default=2, type=int, required=False, help="检查点保存频率")
    parser.add_argument("--checkpoint_save_size", default=1, type=int, required=False, help="单轮训练中检查点保存数量")
    parser.add_argument("--batch_size", default=64, type=int, required=False, help="batch大小")
    parser.add_argument("--buffer_size", default=20000, type=int, required=False, help="Dataset加载缓冲大小")
    parser.add_argument("--beam_size", default=3, type=int, required=False, help="BeamSearch的beam大小")
    parser.add_argument("--valid_data_split", default=0.2, type=float, required=False, help="从训练数据集中划分验证数据的比例")
    parser.add_argument("--epochs", default=5, type=int, required=False, help="训练步数")
    parser.add_argument("--start_sign", default="<start>", type=str, required=False, help="序列开始标记")
    parser.add_argument("--end_sign", default="<end>", type=str, required=False, help="序列结束标记")
    parser.add_argument("--unk_sign", default="<unk>", type=str, required=False, help="未登录词")
    parser.add_argument("--encoder_save_path", default="models/tensorflow/seq2seq/encoder", type=str,
                        required=False, help="Encoder的SaveModel格式保存路径")
    parser.add_argument("--decoder_save_path", default="models/tensorflow/seq2seq/decoder", type=str,
                        required=False, help="Decoder的SaveModel格式保存路径")

    options = parser.parse_args().__dict__
    execute_type = options["act"]
    if options["config_file"] != "":
        with open(options["config_file"], "r", encoding="utf-8") as config_file:
            options = json.load(config_file)

    # 注意了有关路径的参数,以tensorflow目录下为基准配置
    file_path = os.path.abspath(__file__)
    work_path = file_path[:file_path.find("tensorflow")]

    encoder = seq2seq.encoder(
        vocab_size=options["vocab_size"], embedding_dim=options["embedding_dim"], num_layers=options["encoder_layers"],
        enc_units=options["units"] // 2, cell_type=options["cell_type"], if_bidirectional=options["if_bidirectional"]
    )
    decoder = seq2seq.decoder(
        vocab_size=options["vocab_size"], embedding_dim=options["embedding_dim"], dec_units=options["units"],
        enc_units=options["units"], num_layers=options["decoder_layers"], cell_type=options["cell_type"]
    )

    loss_metric = tf.keras.metrics.Mean(name="loss_metric")
    accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy_metric")
    optimizer = tf.optimizers.Adam(name="optimizer")
    checkpoint_manager = load_checkpoint(
        checkpoint_dir=work_path + options["checkpoint_dir"], execute_type=execute_type,
        encoder=encoder, decoder=decoder, checkpoint_save_size=options["checkpoint_save_size"]
    )

    modules = Seq2SeqModule(
        loss_metric=loss_metric, accuracy_metric=accuracy_metric, batch_size=options["batch_size"],
        buffer_size=options["buffer_size"], max_sentence=options["max_sentence"], train_data_type="read_single_data",
        valid_data_type="read_single_data", dict_path=work_path + options["dict_path"], encoder=encoder, decoder=decoder
    )

    if execute_type == "pre_treat":
        preprocess_dataset(dataset_name="lccc", raw_data_path=work_path + options["resource_data_path"],
                           tokenized_data_path=work_path + options["tokenized_data_path"], remove_tokenized=True)
        to_single_turn_dataset(tokenized_data_path=work_path + options["tokenized_data_path"],
                               dict_path=work_path + options["dict_path"], unk_sign=options["unk_sign"],
                               start_sign=options["start_sign"], end_sign=options["end_sign"],
                               max_data_size=options["max_train_data_size"], vocab_size=options["vocab_size"],
                               qa_data_path=work_path + options["preprocess_data_path"])
    elif execute_type == "train":
        history = {"train_accuracy": [], "train_loss": [], "valid_accuracy": [], "valid_loss": []}
        tokenizer = load_tokenizer(dict_path=work_path + options["dict_path"])
        history = modules.train(
            optimizer=optimizer, epochs=options["epochs"], checkpoint=checkpoint_manager, history=history,
            train_data_path=work_path + options["preprocess_data_path"], valid_data_path="",
            checkpoint_save_freq=options["checkpoint_save_freq"], max_valid_data_size=options["max_valid_data_size"],
            max_train_data_size=options["max_train_data_size"], valid_data_split=options["valid_data_split"],
            start_sign=tokenizer.word_index.get(options["start_sign"]),
            encoder_save_path=work_path + options["encoder_save_path"],
            decoder_save_path=work_path + options["decoder_save_path"]
        )
        show_history(history=history, valid_freq=options["checkpoint_save_freq"],
                     save_dir=work_path + options["history_image_dir"])
    elif execute_type == "evaluate":
        tokenizer = load_tokenizer(dict_path=work_path + options["dict_path"])
        modules.evaluate(
            max_valid_data_size=options["max_valid_data_size"], valid_data_path=work_path + options["valid_data_path"],
            start_sign=tokenizer.word_index.get(options["start_sign"])
        )
    elif execute_type == "chat":
        print("Agent: 你好!结束聊天请输入ESC。")
        while True:
            request = input("User: "******"ESC":
                print("Agent: 再见!")
                exit(0)
            response = modules.inference(request=request, beam_size=options["beam_size"],
                                         start_sign=options["start_sign"], end_sign=options["end_sign"])
            print("Agent: ", response)
    else:
        parser.error(message="")