def summarize(self, text, num=320, title=None): # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") self.title = title if self.title: self.title = macropodus_cut(title) # 切词 sentences_cut = [[word for word in macropodus_cut(extract_chinese(sentence)) if word.strip()] for sentence in self.sentences] # 去除停用词等 self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut] # 词频统计 self.words = [] for sen in self.sentences_cut: self.words = self.words + sen self.word_count = dict(Counter(self.words)) # word_count_rank = sorted(word_count.items(), key=lambda f:f[1], reverse=True) # self.word_freqs = [{'word':wcr[0], 'freq':wcr[1]} for wcr in word_count_rank] # 按频次计算词语的得分, 得到self.word_freq=[{'word':, 'freq':, 'score':}] self.word_freqs = {} self.len_words = len(self.words) for k, v in self.word_count.items(): self.word_freqs[k] = v * 0.5 / self.len_words # 句子位置打分 scores_posi = self.score_position() res_rank = {} self.res_score = [] for i in range(len(sentences_cut)): sen = self.sentences[i] # 句子 sen_cut = self.sentences_cut[i] # 句子中的词语 score_sbs = self.score_sbs(sen_cut) # 句子中的词语打分1 score_dbs = self.score_dbs(sen_cut) # 句子中的词语打分2 score_word = (score_sbs + score_dbs) * 10.0 / 2.0 # 句子中的词语打分mix score_length = self.score_length(sen) # 句子文本长度打分 score_posi = scores_posi[i] if self.title: # 有标题的文本打分合并 score_title = self.score_title(sen_cut) score_total = (score_title * 0.5 + score_word * 2.0 + score_length * 0.5 + score_posi * 1.0) / 4.0 # 可查阅各部分得分统计 self.res_score.append(["score_total", "score_sbs", "score_dbs", "score_word", "score_length", "score_posi", "score_title", "sentences"]) self.res_score.append([score_total, score_sbs, score_dbs, score_word, score_length, score_posi, score_title, self.sentences[i]]) else: # 无标题的文本打分合并 score_total = (score_word * 2.0 + score_length * 0.5 + score_posi * 1.0) / 3.5 self.res_score.append(["score_total", "score_sbs", "score_dbs", "score_word", "score_length", "score_posi", "sentences"]) self.res_score.append([score_total, score_sbs, score_dbs, score_word, score_length, score_posi, self.sentences[i].strip()]) res_rank[self.sentences[i].strip()] = score_total # 最小句子数 num_min = min(num, int(len(self.word_count) * 0.6)) score_sen = [(rc[1], rc[0]) for rc in sorted(res_rank.items(), key=lambda d: d[1], reverse=True)][0:num_min] return score_sen
def sentence2idx(self, text): text = extract_chinese(str(text).upper()) if self.level_type == 'char': text = list(text) elif self.level_type == 'word': text = macropodus_cut(text) else: raise RuntimeError( "your input level_type is wrong, it must be 'word' or 'char'") text = [text_one for text_one in text] len_leave = self.len_max - len(text) if len_leave >= 0: text_index = [ self.token2idx[text_char] if text_char in self.token2idx else self.token2idx['[UNK]'] for text_char in text ] + [self.token2idx['[PAD]'] for i in range(len_leave)] else: text_index = [ self.token2idx[text_char] if text_char in self.token2idx else self.token2idx['[UNK]'] for text_char in text[0:self.len_max] ] input_mask = min(len(text), self.len_max) return [text_index, input_mask]
def deal_corpus(self): token2idx = self.ot_dict.copy() count = 3 if 'term' in self.corpus_path: with open(file=self.corpus_path, mode='r', encoding='utf-8') as fd: while True: term_one = fd.readline() if not term_one: break term_one = term_one.strip() if term_one not in token2idx: count = count + 1 token2idx[term_one] = count elif 'corpus' in self.corpus_path: with open(file=self.corpus_path, mode='r', encoding='utf-8') as fd: terms = fd.readlines() for term_one in terms: if self.level_type == 'char': text = list(term_one.replace(' ', '').strip()) elif self.level_type == 'word': text = macropodus_cut(term_one) else: raise RuntimeError("your input level_type is wrong, it must be 'word' or 'char'") for text_one in text: if term_one not in token2idx: count = count + 1 token2idx[text_one] = count else: raise RuntimeError("your input corpus_path is wrong, it must be 'dict' or 'corpus'") self.token2idx = token2idx self.idx2token = {} for key, value in self.token2idx.items(): self.idx2token[value] = key
def _build_corpus(sentences): """Construct corpus from provided sentences. Parameters ---------- sentences : list of :class:`~gensim.summarization.syntactic_unit.SyntacticUnit` Given sentences. Returns ------- list of list of (int, int) Corpus built from sentences. """ split_tokens = [macropodus_cut(sentence) for sentence in sentences] dictionary = Dictionary(split_tokens) return [dictionary.doc2bow(token) for token in split_tokens]
def summarize(self, text, num=320): """ 根据词语意义确定中心句 :param text: str :param num: int :return: list """ # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") # 切词 sentences_cut = [[word for word in macropodus_cut(extract_chinese(sentence)) if word.strip()] for sentence in self.sentences] # 去除停用词等 self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut] # 词频统计 self.words = [] for sen in self.sentences_cut: self.words = self.words + sen self.word_count = dict(Counter(self.words)) self.word_count_rank = sorted(self.word_count.items(), key=lambda f: f[1], reverse=True) # 最小句子数 num_min = min(num, int(len(self.word_count)*0.6)) # 词语排序, 按照词频 self.word_rank = [wcr[0] for wcr in self.word_count_rank][0:num_min] res_sentence = [] # 抽取句子, 顺序, 如果词频高的词语在句子里, 则抽取 for word in self.word_rank: for i in range(0, len(self.sentences)): # 当返回关键句子到达一定量, 则结束返回 if len(res_sentence) < num_min: added = False for sent in res_sentence: if sent == self.sentences[i]: added = True if (added == False and word in self.sentences[i]): res_sentence.append(self.sentences[i]) break # 只是计算各得分,没什么用 len_sentence = len(self.sentences) res_sentence = [(1-1/(len_sentence+len_sentence/(k+1)), rs) for k, rs in enumerate(res_sentence)] return res_sentence
def summarize(self, text, num=8, alpha=0.6): """ :param text: str :param num: int :return: list """ # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") # 切词 sentences_cut = [[word for word in macropodus_cut(extract_chinese(sentence)) if word.strip()] for sentence in self.sentences] # 去除停用词等 self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut] self.sentences_cut = [" ".join(sc) for sc in self.sentences_cut] # # 计算每个句子的词语个数 # sen_word_len = [len(sc)+1 for sc in sentences_cut] # 计算每个句子的tfidf sen_tfidf = tfidf_fit(self.sentences_cut) # 矩阵中两两句子相似度 SimMatrix = (sen_tfidf * sen_tfidf.T).A # 例如: SimMatrix[1, 3] # "第2篇与第4篇的相似度" # 输入文本句子长度 len_sen = len(self.sentences) # 句子标号 sen_idx = [i for i in range(len_sen)] summary_set = [] mmr = {} for i in range(len_sen): if not self.sentences[i] in summary_set: sen_idx_pop = copy.deepcopy(sen_idx) sen_idx_pop.pop(i) # 两两句子相似度 sim_i_j = [SimMatrix[i, j] for j in sen_idx_pop] score_tfidf = sen_tfidf[i].toarray()[0].sum() # / sen_word_len[i], 如果除以词语个数就不准确 mmr[self.sentences[i]] = alpha * score_tfidf - (1 - alpha) * max(sim_i_j) summary_set.append(self.sentences[i]) score_sen = [(rc[1], rc[0]) for rc in sorted(mmr.items(), key=lambda d: d[1], reverse=True)] if len(mmr) > num: score_sen = score_sen[0:num] return score_sen
def deal_corpus(self): import json token2idx = self.ot_dict.copy() if 'term' in self.corpus_path: with open(file=self.corpus_path, mode='r', encoding='utf-8') as fd: while True: term_one = fd.readline() if not term_one: break if term_one not in token2idx: token2idx[term_one] = len(token2idx) elif os.path.exists(self.corpus_path): with open(file=self.corpus_path, mode='r', encoding='utf-8') as fd: terms = fd.readlines() for line in terms: ques_label = json.loads(line.strip()) term_one = ques_label["question"] term_one = "".join(term_one) if self.level_type == 'char': text = list(term_one.replace(' ', '').strip()) elif self.level_type == 'word': text = macropodus_cut(term_one) elif self.level_type == 'ngram': text = get_ngrams(term_one, ns=self.ngram_ns) else: raise RuntimeError( "your input level_type is wrong, it must be 'word', 'char', 'ngram'" ) for text_one in text: if text_one not in token2idx: token2idx[text_one] = len(token2idx) else: raise RuntimeError( "your input corpus_path is wrong, it must be 'dict' or 'corpus'" ) self.token2idx = token2idx self.idx2token = {} for key, value in self.token2idx.items(): self.idx2token[value] = key
def pinyin(self, text): """ 中文(大陆)转拼音 :param text: str, like "大漠帝国" :return: list, like ["da", "mo", "di", "guo"] """ res_pinyin = [] # 只选择中文(zh), split筛选 text_re = re_zh_cn.split(text) for tr in text_re: if re_zh_cn.match(tr): # 切词 tr_cut = macropodus_cut(tr) for trc in tr_cut: # 切词后的词语 # get words from dict of default trc_pinyin = self.dict_pinyin.get(trc) if trc_pinyin: res_pinyin += trc_pinyin else: # 单个字的问题 for trc_ in trc: # get trem from dict of default trc_pinyin = self.dict_pinyin.get(trc_) if trc_pinyin: res_pinyin += trc_pinyin return res_pinyin
def summarize(self, text, num=8, topic_min=6, judge_topic=None): """ LDA :param text: str :param num: int :param topic_min: int :param judge_topic: boolean :return: """ # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") len_sentences_cut = len(self.sentences) # 切词 sentences_cut = [[ word for word in macropodus_cut(extract_chinese(sentence)) if word.strip() ] for sentence in self.sentences] # 去除停用词等 self.sentences_cut = [ list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut ] self.sentences_cut = [" ".join(sc) for sc in self.sentences_cut] # # 计算每个句子的tf # vector_c = CountVectorizer(ngram_range=(1, 2), stop_words=self.stop_words) # tf_ngram = vector_c.fit_transform(self.sentences_cut) # 计算每个句子的tfidf tf_ngram = tfidf_fit(self.sentences_cut) # 主题数, 经验判断 topic_num = min(topic_min, int(len(sentences_cut) / 2)) # 设定最小主题数为3 lda = LatentDirichletAllocation(n_topics=topic_num, max_iter=32, learning_method='online', learning_offset=50., random_state=2019) res_lda_u = lda.fit_transform(tf_ngram.T) res_lda_v = lda.components_ if judge_topic: ### 方案一, 获取最大那个主题的k个句子 ################################################################################## topic_t_score = np.sum(res_lda_v, axis=-1) # 对每列(一个句子topic_num个主题),得分进行排序,0为最大 res_nmf_h_soft = res_lda_v.argsort(axis=0)[-topic_num:][::-1] # 统计为最大每个主题的句子个数 exist = (res_nmf_h_soft <= 0) * 1.0 factor = np.ones(res_nmf_h_soft.shape[1]) topic_t_count = np.dot(exist, factor) # 标准化 topic_t_count /= np.sum(topic_t_count, axis=-1) topic_t_score /= np.sum(topic_t_score, axis=-1) # 主题最大个数占比, 与主题总得分占比选择最大的主题 topic_t_tc = topic_t_count + topic_t_score topic_t_tc_argmax = np.argmax(topic_t_tc) # 最后得分选择该最大主题的 res_nmf_h_soft_argmax = res_lda_v[topic_t_tc_argmax].tolist() res_combine = {} for l in range(len_sentences_cut): res_combine[self.sentences[l]] = res_nmf_h_soft_argmax[l] score_sen = [(rc[1], rc[0]) for rc in sorted( res_combine.items(), key=lambda d: d[1], reverse=True)] ##################################################################################### else: ### 方案二, 获取最大主题概率的句子, 不分主题 res_combine = {} for i in range(len_sentences_cut): res_row_i = res_lda_v[:, i] res_row_i_argmax = np.argmax(res_row_i) res_combine[self.sentences[i]] = res_row_i[res_row_i_argmax] score_sen = [(rc[1], rc[0]) for rc in sorted( res_combine.items(), key=lambda d: d[1], reverse=True)] num_min = min(num, int(len_sentences_cut * 0.6)) return score_sen[0:num_min]
def keyword(self, text, num=6, score_min=0.025, win_size=3, type_sim="total", type_encode="avg", config={ "alpha": 0.86, "max_iter": 100 }): """ 关键词抽取, textrank of word2vec cosine :param text: str, doc. like "大漠帝国是历史上存在的国家吗?你知不知道?嗯。" :param num: int, length of sentence like 6 :param win_size: int, windows size of combine. like 2 :param type_sim: str, type of simiilarity. like "total", "cosine" :param config: dict, config of pagerank. like {"alpha": 0.86, "max_iter":100} :return: list, result of keyword. like [(0.020411696169510562, '手机'), (0.016149784106276977, '夏普')] """ # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") # macropodus_cut 切词 self.macropodus_word = [ macropodus_cut(sentence) for sentence in self.sentences ] # 去除停用词等 self.sentences_word = [[ w for w in mw if w not in self.stop_words.values() ] for mw in self.macropodus_word] # 构建图的顶点 word2index = {} index2word = {} word_index = 0 for sent_words in self.sentences_word: for word in sent_words: if not word in word2index: # index word2index[word] = word_index index2word[word_index] = word word_index += 1 graph_words = np.zeros((word_index, word_index)) # 构建图的边, 以两个词语的余弦相似度为基础 for sent_words in self.sentences_word: for cw_1, cw_2 in self.cut_window(sent_words, win_size=win_size): if cw_1 in word2index and cw_2 in word2index: idx_1, idx_2 = word2index[cw_1], word2index[cw_2] score_w2v_cosine = self.similarity(cw_1, cw_2, type_sim=type_sim, type_encode=type_encode) graph_words[idx_1][idx_2] = score_w2v_cosine graph_words[idx_2][idx_1] = score_w2v_cosine # 构建相似度矩阵 w2v_cosine_sim = nx.from_numpy_matrix(graph_words) # nx.pagerank sens_scores = nx.pagerank(w2v_cosine_sim, **config) # 得分排序 sen_rank = sorted(sens_scores.items(), key=lambda x: x[1], reverse=True) # 保留topk个, 防止越界 topk = min(len(sen_rank), num) # 返回原句子和得分 return [(sr[1], index2word[sr[0]]) for sr in sen_rank if len(index2word[sr[0]]) > 1 and score_min <= sr[1]][0:topk]
def summarize(self, text, num=320, title=None): """ 文本句子排序 :param docs: list :return: list """ # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") self.title = title if self.title: self.title = macropodus_cut(title) # 切词,含词性标注 self.sentences_tag_cut = [jieba_tag_cut(extract_chinese(sentence)) for sentence in self.sentences] # 词语,不含词性标注 sentences_cut = [[jc for jc in jtc.keys() ] for jtc in self.sentences_tag_cut] # 去除停用词等 self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut] # 词频统计 self.words = [] for sen in self.sentences_cut: self.words = self.words + sen self.word_count = dict(Counter(self.words)) # 按频次计算词语的得分, 得到self.word_freq=[{'word':, 'freq':, 'score':}] self.word_freqs = {} self.len_words = len(self.words) for k, v in self.word_count.items(): self.word_freqs[k] = v * 0.5 / self.len_words # uni_bi_tri_gram特征 [gram_uni, gram_bi, gram_tri] = get_ngrams("".join(self.sentences), ns=[1, 2, 3]) ngrams = gram_uni + gram_bi + gram_tri self.ngrams_count = dict(Counter(ngrams)) # 句子位置打分 scores_posi = self.score_position() # 句子长度打分 scores_length = self.score_length() # 句子词性打分, 名词(1.2)-代词(0.8)-动词(1.0) scores_tag = self.score_tag() res_rank = {} self.res_score = [] for i in range(len(sentences_cut)): sen_cut = self.sentences_cut[i] # 句子中的词语 # ngram得分 [gram_uni_, gram_bi_, gram_tri_] = get_ngrams(self.sentences[i], ns=[1, 2, 3]) # gram_uni_bi_tri(self.sentences[i]) n_gram_s = gram_uni_ + gram_bi_ + gram_tri_ score_ngram = sum([self.ngrams_count[ngs] if ngs in self.ngrams_count else 0 for ngs in n_gram_s]) / (len(n_gram_s) + 1) # 句子中词语的平均长度 score_word_length_avg = sum([len(sc) for sc in sen_cut])/(len(sen_cut)+1) score_posi = scores_posi[i] score_length = scores_length[i] score_tag = scores_tag[i] if self.title: # 有标题的文本打分合并 score_title = self.score_title(sen_cut) score_total = (score_title * 0.5 + score_ngram * 2.0 + score_word_length_avg * 0.5 + score_length * 0.5 + score_posi * 1.0 + score_tag * 0.6) / 6.0 # 可查阅各部分得分统计 self.res_score.append(["score_title", "score_ngram", "score_word_length_avg", "score_length", "score_posi", "score_tag"]) self.res_score.append([score_title, score_ngram, score_word_length_avg, score_length, score_posi, score_tag, self.sentences[i]]) else: # 无标题的文本打分合并 score_total = (score_ngram * 2.0 + score_word_length_avg * 0.5 + score_length * 0.5 + score_posi * 1.0 + score_tag * 0.6) / 5.0 # 可查阅各部分得分统计 self.res_score.append(["score_ngram", "score_word_length_avg", "score_length", "score_posi", "score_tag"]) self.res_score.append([score_ngram, score_word_length_avg, score_length, score_posi, score_tag, self.sentences[i]]) res_rank[self.sentences[i].strip()] = score_total # 最小句子数 num_min = min(num, int(len(self.word_count) * 0.6)) res_rank_sort = sorted(res_rank.items(), key=lambda rr: rr[1], reverse=True) res_rank_sort_reverse = [(rrs[1], rrs[0]) for rrs in res_rank_sort][0:num_min] return res_rank_sort_reverse
def summarize(self, text, num=320, topic_min=5, judge_topic='all'): """ :param text: :param num: :return: """ # 切句 if type(text) == str: self.sentences = cut_sentence(text) elif type(text) == list: self.sentences = text else: raise RuntimeError("text type must be list or str") len_sentences_cut = len(self.sentences) # 切词 sentences_cut = [[word for word in macropodus_cut(extract_chinese(sentence)) if word.strip()] for sentence in self.sentences] # 去除停用词等 self.sentences_cut = [list(filter(lambda x: x not in self.stop_words, sc)) for sc in sentences_cut] self.sentences_cut = [" ".join(sc) for sc in self.sentences_cut] # 计算每个句子的tfidf sen_tfidf = tfidf_fit(self.sentences_cut) # 主题数, 经验判断 topic_num = min(topic_min, int(len(sentences_cut)/2)) # 设定最小主题数为3 svd_tfidf = TruncatedSVD(n_components=topic_num, n_iter=32) res_svd_u = svd_tfidf.fit_transform(sen_tfidf.T) res_svd_v = svd_tfidf.components_ if judge_topic: ### 方案一, 获取最大那个主题的k个句子 ################################################################################## topic_t_score = np.sum(res_svd_v, axis=-1) # 对每列(一个句子topic_num个主题),得分进行排序,0为最大 res_nmf_h_soft = res_svd_v.argsort(axis=0)[-topic_num:][::-1] # 统计为最大每个主题的句子个数 exist = (res_nmf_h_soft <= 0) * 1.0 factor = np.ones(res_nmf_h_soft.shape[1]) topic_t_count = np.dot(exist, factor) # 标准化 topic_t_count /= np.sum(topic_t_count, axis=-1) topic_t_score /= np.sum(topic_t_score, axis=-1) # 主题最大个数占比, 与主题总得分占比选择最大的主题 topic_t_tc = topic_t_count + topic_t_score topic_t_tc_argmax = np.argmax(topic_t_tc) # 最后得分选择该最大主题的 res_nmf_h_soft_argmax = res_svd_v[topic_t_tc_argmax].tolist() res_combine = {} for l in range(len_sentences_cut): res_combine[self.sentences[l]] = res_nmf_h_soft_argmax[l] score_sen = [(rc[1], rc[0]) for rc in sorted(res_combine.items(), key=lambda d: d[1], reverse=True)] ##################################################################################### else: ### 方案二, 获取最大主题概率的句子, 不分主题 res_combine = {} for i in range(len_sentences_cut): res_row_i = res_svd_v[:, i] res_row_i_argmax = np.argmax(res_row_i) res_combine[self.sentences[i]] = res_row_i[res_row_i_argmax] score_sen = [(rc[1], rc[0]) for rc in sorted(res_combine.items(), key=lambda d: d[1], reverse=True)] num_min = min(num, int(len_sentences_cut * 0.6)) return score_sen[0:num_min]