def synonym_bigram(texts, center_word, filtrate=get_flag): """组合词""" tk.add_word(center_word, 2000, 'CENTER') left, right = Counter(), Counter() for text in texts: if center_word in text: for sentence in clean.ngram(text): words = [w for w in tk.cut(sentence) if filtrate(w)] for i in range(len(words) - 1): if words[i] == center_word: word = ' '.join(words[i:i + 2]) flag = ' '.join(tk.get_flag(w) for w in words[i:i + 2]) left[(word, flag)] += 1 if words[i + 1] == center_word: word = ' '.join(words[i:i + 2]) flag = ' '.join(tk.get_flag(w) for w in words[i:i + 2]) right[(word, flag)] += 1 u = max(left.most_common()[0][1], right.most_common()[0][1]) left = corpus.ls2df([(i, j, k, tk.bar(k, u)) for (i, j), k in left.most_common()], ['word', 'flag', 'freq', 'bar']) right = corpus.ls2df([(i, j, k, tk.bar(k, u)) for (i, j), k in right.most_common()], ['word', 'flag', 'freq', 'bar']) corpus.df2sheets([left, right], ['left', 'right'], 'synonym_bigram_%s.xlsx' % center_word)
def trigram(texts, n=2, stop_words=STOP_WORDS): """统计语言模型""" c = Counter() for text in texts: for sentence in clean.ngram(clear(text)): words = [w for w in tk.cut(sentence) if w not in stop_words] for i in range(len(words) + 1 - n): c[' '.join(words[i:i + n])] += 1 DataFrame(c.most_common(N), columns=['word', 'freq'])[['freq', 'word']].to_excel('%dgram.xlsx' % n, index=False)
def trigram_flag(texts, n=2, stop_words=STOP_WORDS): """统计语言模型(带词性)""" c = Counter() for text in texts: for sentence in clean.ngram(clear(text)): words = [w for w in tk.cut(sentence) if w not in stop_words] for i in range(len(words) + 1 - n): word = ' '.join(words[i:i + n]) flag = ' '.join(tk.get_flag(w) for w in words[i:i + n]) c[(word, flag)] += 1 u = c.most_common()[0][1] c = [(i, j, k, tk.bar(k, u)) for (i, j), k in c.most_common(N)] DataFrame(c, columns=['word', 'flag', 'freq', 'bar']).to_excel('%dgram_flag.xlsx' % n, index=False)
def trigram_flag_sort(texts, n=2, stop_words=STOP_WORDS): """统计语言模型(带词性+排序)""" c = Counter() for text in texts: for sentence in clean.ngram(clear(text)): words = [w for w in tk.cut(sentence) if w not in stop_words] for i in range(len(words) + 1 - n): wf = sorted([(tk.get_flag(w), w) for w in words[i:i + n]]) word = ' '.join(j[1] for j in wf) flag = ' '.join(j[0] for j in wf) c[(word, flag)] += 1 c = [(k, j, i) for (i, j), k in c.most_common(N)] DataFrame(c, columns=['freq', 'flag', 'word']).to_excel('%dgram_flag_sort.xlsx' % n, index=False)
def synonym_neighbor(texts, center_word, filtrate=get_flag, half=5): """组合词""" tk.add_word(center_word, 2000, 'CENTER') c = Counter() for text in texts: if center_word in text: for sentence in clean.text2phrase(text): words = [w for w in tk.cut(sentence) if filtrate(w)] length = len(words) for i in range(length): if words[i] == center_word: for j in range(max(i - half, 0), min(i + 1 + half, length)): word = words[j] flag = tk.get_flag(word) c[(word, flag)] += 1 / max(abs(j - i), 1) u = c.most_common()[1][1] df = corpus.ls2df([(i, j, k, tk.bar(k, u)) for (i, j), k in c.most_common()], ['word', 'flag', 'freq', 'bar']) corpus.df2sheet(df, 'synonym_neighbor_%s.xlsx' % center_word)
def texts2sentences(texts): # return [[w for w in tk.cut(clear(t)) if get_flag(w)] for t in texts] return [[w for w in tk.cut(p) if get_flag(w)] for t in texts for p in clean.text2sentence(clear(t))]
def cut(text): for sentence in clean.ngram(text.strip()): for word in tk.cut(sentence): if clean.is_word(word) and tk.get_flag( word) not in discarded_flags: yield word
def cut(text): for sentence in clear(text): for word in tk.cut(sentence): word = word.strip() if word not in corpus.STOP_WORDS and word != '': yield word