Beispiel #1
0
def init_bayes_model(category_tree: Category, documents_size: int,
                     vocab_size: int):
    """
    初始化模型所用参数
    :param category_tree: 分类树根节点
    :param documents_size: 文档数
    :param vocab_size: 单词数
    :return: P(C) -> (category_size, )
             P(C|D) -> (category_size, documents_size)
             P(W|C) -> (vocab_size, category_size)
    """
    category_list = category_tree.get_category_list()
    category_size = len(category_list)
    category_prior_probability = np.zeros(category_size)  # 类别先验概率P(C)
    category_document_cond_probability = np.zeros(
        ([documents_size, category_size]))  # 文档条件概率P(C|D)

    # 根据预标注结果初始化P(C)和P(C|D)
    logging.info("参数初始化")
    for c, category in tqdm(enumerate(category_list)):
        category_path = category.split("/")
        category_documents = category_tree.find_category(
            category_path).get_documents()
        for document_index in category_documents:
            category_document_cond_probability[document_index, c] = 1.0
        category_prior_probability[c] = (1.0 + len(category_documents)) / (
            category_size + documents_size)  # using Laplace smooth

    category_document_cond_probability = category_document_cond_probability.T  # 转置便于矩阵乘法
    word_category_cond_probability = np.zeros([vocab_size, len(category_list)])
    logging.info("预标注比例: {}/{}".format(
        int(category_document_cond_probability.sum()), documents_size))

    return category_prior_probability, category_document_cond_probability, word_category_cond_probability
Beispiel #2
0
def hierarchical_shrinkage_init(category_tree: Category, document_vectors):
    """
    shrinkage步骤利用分类的层次关系来缓解特征稀疏的问题
    1/|V|(λ4) <- ROOT(λ3) <- 新闻(λ2) <- 国际新闻(λ1) <- 经济新闻(λ0)
    按层次关系将父分类词的概率加权后累加在子分类上
    :param category_tree: 分类树root节点
    :param document_vectors: 文档词频矩阵
    :return: λ -> (category_size, max_depth + 2)
             β -> (documents_size, category_size, max_depth + 2)
             P^{α}(W|C) -> (vocab_size, category_size, max_depth + 2)
    """
    logging.info("初始化shrinkage参数")
    max_depth = Category.get_max_depth(category_tree)
    category_list = category_tree.get_category_list()
    category_size = len(category_list)
    lambda_size = max_depth + 2
    lambda_matrix = np.zeros([category_size, lambda_size])
    for c, path in enumerate(category_list):
        category_node = category_tree.find_category(path.split("/"))
        depth = category_node.get_depth()
        init_lambda_val = 1.0 / (depth + 2)
        for k in range(depth):
            lambda_matrix[c, k] = init_lambda_val
        lambda_matrix[c, max_depth] = init_lambda_val
        lambda_matrix[c, max_depth + 1] = init_lambda_val
    # init β
    documents_size, vocab_size = document_vectors.shape
    beta_matrix = np.zeros([documents_size, category_size, lambda_size])
    # init P^{α}(W|C)
    p_w_c_k = np.zeros([vocab_size, category_size, lambda_size])
    return lambda_matrix, beta_matrix, p_w_c_k
Beispiel #3
0
def maximization_step_with_shrinkage(category_tree: Category, document_vectors,
                                     p_c, p_c_d, p_w_c, p_w_c_k, lambda_matrix,
                                     beta_matrix, iter: int):
    # E-step更新P(C|D)后, 在M-step中更新P(W|C)(公式1)和P(C) (function 2)
    documents_size, vocab_size = document_vectors.shape
    category_size, lambda_size = lambda_matrix.shape
    category_list = category_tree.get_category_list()
    # vertical M
    if iter > 0:
        shrinkage_maximization_step(lambda_matrix, beta_matrix, p_c_d)
    # horizontal M
    # update P^{α}(w|c)
    logging.info("Horizontal M-step")
    for c in tqdm(range(category_size)):
        category_path = category_list[c].split("/")
        dep_list = []
        category_depth = len(category_path)
        for k in range(category_depth):
            # 第一层为该类自身, 然后沿着层级直到ROOT(不包含ROOT)
            dep_list.append(category_list.index("/".join(category_path)))
            category_vectors = p_c_d[dep_list] @ document_vectors  # 只需取出包含的类别
            if category_vectors.ndim == 1:
                category_vectors = category_vectors.reshape(1, -1)
            category_vector_hierarchy = category_vectors.sum(
                axis=0)  # 将父分类的文本集也算入子分类中
            category_vector_hierarchy_sum = category_vector_hierarchy.sum()
            for v in range(vocab_size):
                p_w_c_k[v, c, k] = (1.0 + category_vector_hierarchy[v]) / (
                    vocab_size + category_vector_hierarchy_sum)
            category_path = category_path[:-1]
    category_vector_root = document_vectors.sum(axis=0)
    category_vector_root_sum = document_vectors.sum()
    for v in range(vocab_size):
        p_w_c_k[v, :, -2] = (1.0 + category_vector_root[0, v]) / (
            vocab_size + category_vector_root_sum
        )  # category_vector_root.ndim=2
    p_w_c_k[:, :, -1] = 1.0 / vocab_size
    # update p_w_c (function 4)
    for v in range(vocab_size):
        p_w_c[v] = (lambda_matrix * p_w_c_k[v]).sum(axis=1)
    # update p_c (function 2)
    for c in range(category_size):
        p_c[c] = (1 + p_c_d[c].sum()) / (category_size + documents_size)