def calc_tfidf(trainsetfile,stopwordfile,dstdir):
    data_set = joblib.load(trainsetfile)
    wordbag = Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
    wordbag.target_name = data_set.tatget_name
    wordbag.label = data_set.label
    
    corpus = data_set.contents
    stopwordlist = read_stopword(stopwordfile)
    vectorize = TfidfVectorizer(sublinear_tf=True,max_df = 0.8,min_df=3,max_features=50000,stop_words=stopwordlist)
    feature_train = vectorize.fit_transform(corpus)
    wordbag.tdm = feature_train
    wordbag.vocabulary = vectorize.vocabulary_
    joblib.dump(wordbag,dstdir+"/"+"word_bag.data",compress=3)
def testset_tfidf(testsetfile,stopwordfile,myvocabulary):
    data_set = joblib.load(testsetfile)
    wordbag = Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
    wordbag.target_name = data_set.tatget_name
    wordbag.label = data_set.label
    
    corpus = data_set.contents
    stopwordlist = read_stopword(stopwordfile)
    vectorize = TfidfVectorizer(sublinear_tf=True,stop_words=stopwordlist,vocabulary=myvocabulary)
    feature_train = vectorize.fit_transform(corpus)
    wordbag.tdm = feature_train
    joblib.dump(wordbag,"test_wordbag/test_word_bag.data",compress=3)
    return wordbag
    
예제 #3
0
def tfidf_bag(corpus_Bunch, filename):

    vectorizer = TfidfVectorizer()
    tfidf = vectorizer.fit_transform(corpus_Bunch.content)
    vocabulary = vectorizer.vocabulary_

    tfidf_Bunch = Bunch(targetName=[], label=[], tfidf=[], vocabulary={})
    tfidf_Bunch.targetName = corpus_Bunch.targetName
    tfidf_Bunch.label = corpus_Bunch.label
    tfidf_Bunch.tfidf = tfidf
    tfidf_Bunch.vocabulary = vocabulary

    file_obj = open(filename, "wb")
    pickle.dump(tfidf_Bunch, file_obj)
    file_obj.close()
예제 #4
0
def testset_tfidf(testsetfile, stopwordfile, myvocabulary):
    data_set = joblib.load(testsetfile)
    wordbag = Bunch(target_name=[],
                    label=[],
                    filenames=[],
                    tdm=[],
                    vocabulary={})
    wordbag.target_name = data_set.tatget_name
    wordbag.label = data_set.label

    corpus = data_set.contents
    stopwordlist = read_stopword(stopwordfile)
    vectorize = TfidfVectorizer(sublinear_tf=True,
                                stop_words=stopwordlist,
                                vocabulary=myvocabulary)
    feature_train = vectorize.fit_transform(corpus)
    wordbag.tdm = feature_train
    joblib.dump(wordbag, "test_wordbag/test_word_bag.data", compress=3)
    return wordbag
예제 #5
0
def calc_tfidf(trainsetfile, stopwordfile, dstdir):
    data_set = joblib.load(trainsetfile)
    wordbag = Bunch(target_name=[],
                    label=[],
                    filenames=[],
                    tdm=[],
                    vocabulary={})
    wordbag.target_name = data_set.tatget_name
    wordbag.label = data_set.label

    corpus = data_set.contents
    stopwordlist = read_stopword(stopwordfile)
    vectorize = TfidfVectorizer(sublinear_tf=True,
                                max_df=0.8,
                                min_df=3,
                                max_features=50000,
                                stop_words=stopwordlist)
    feature_train = vectorize.fit_transform(corpus)
    wordbag.tdm = feature_train
    wordbag.vocabulary = vectorize.vocabulary_
    joblib.dump(wordbag, dstdir + "/" + "word_bag.data", compress=3)
예제 #6
0
def tfidfspace(bunch_file, tfidf_file, train_bunch_file=None):
    tfidfbunch = Bunch(labels=[], contents=[], tdm=[], vocabulary={})
    # 读取bunch_file中的bunch, 将label赋予tfidfbunch中的label
    with open(bunch_file, "rb") as f:
        bunch = pickle.load(f)
    tfidfbunch.label = bunch.label
    tfidfbunch.contents = bunch.contents
    if train_bunch_file is None:  # 此时对训练数据生成tfidf空间
        vectorizer = TfidfVectorizer(max_df=0.4, sublinear_tf=True)
        tfidfbunch.tdm = vectorizer.fit_transform(bunch.contents)
        tfidfbunch.vocabulary = vectorizer.vocabulary_
    else:  # 对测试数据生成tfidf空间,保证与训练集的单词字典是相同的。
        with open(train_bunch_file, "rb") as f:
            train_bunch = pickle.load(f)
        tfidfbunch.vocabulary = train_bunch.vocabulary
        vectorizer = TfidfVectorizer(max_df=0.4,
                                     sublinear_tf=True,
                                     vocabulary=train_bunch.vocabulary)
        tfidfbunch.tdm = vectorizer.fit_transform(bunch.contents)
    # 将tfidfbunch写入tfidf_file
    with open(tfidf_file, "wb") as f:
        pickle.dump(tfidfbunch, f)
    #保存tfidf模型
    joblib.dump(vectorizer, TFIDF_FILE)
예제 #7
0
reload(sys)
sys.setdefaultencoding('utf-8')

#导入训练集
train_path = "wordbag" + "/" + "train_set1124.data"
data_set = joblib.load(train_path)
# print data_set.target_name
# print data_set.contents[0]
####exit
# sys.exit(0)
#定义词袋数据结构
#tf-idf计算后的词袋
wordbag = Bunch(target_name=[], label=[], filenames=[], tdm=[], vocabulary={})
wordbag.target_name = data_set.target_name
wordbag.label = data_set.label
#语料
corpus = data_set.contents

#导入停用词
stopwordpath = "extra_dict/stop_words.txt"
stopword_dic = open(stopwordpath, 'r')
stopword_content = stopword_dic.read()
#将停用词转为list
stopwordlist = stopword_content.splitlines()
stopword_dic.close()

#词袋创建时间
start = datetime.datetime.now()
print start
#使用 TfidfVectorizer创建词袋
예제 #8
0
reload(sys)

#导入训练预料
data_set={}
#训练语料集路径
train_path='text_corpus1_wordbag/train_set.data'
file_obj=open(train_path,'rb')

#读取持久化后的对象
data_set=pickle.load(file_obj)
file_obj.close()

#定义词袋数据结构
wordbag=Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
wordbag.target_name=data_set.target_name
wordbag.label=data_set.label
wordbag.filenames=data_set.filenames

#构建语料
corpus=data_set.contents

#从文件导入停用词表
stpwrdpath='extra_dict/hlt_stop_words.txt'
stpwrd_dic=open(stpwrdpath,'rb')
stpwrd_content=stpwrd_dic.read()

#将停用词转换为list
stpwrdlst=stpwrd_content.splitlines()
stpwrd_dic.close()

#计算词袋创建时间:获取开始时间
예제 #9
0
import pickle  # 引入持久化类
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
##################################################################
## 导入数据
categories = ["alt.atheism", "soc.religion.christian", "comp.graphics", "sci.med"]  # 选取需要下载的新闻分类
data_set = fetch_20newsgroups(subset="train", categories=categories, shuffle=True, random_state=42)  # 下载并获取训练数据, 也是先全部下载, 再提取部分
print(data_set.target_names)  # ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']
##################################################################
## 定义词袋数据结构
# tdm:tf-idf 计算后词袋
stpwrdlst = []  # 停用词表为 空
wordbag = Bunch(target_name=[], label=[], filenames=[], tdm=[], vocabulary={}, stpwrdlst=[])
wordbag.target_name = data_set.target_names
wordbag.label = data_set.target
wordbag.filenames = data_set.filenames
wordbag.stpwrdlst = stpwrdlst

vectorizer = CountVectorizer(stop_words=stpwrdlst)  # 使用 TfidfVectorizer 初始化向量空间模型--创建词袋
transformer = TfidfTransformer()  # 该类会统计每个词语的 tf-idf 权值
fea_train = vectorizer.fit_transform(data_set.data)  # 文本转为词频矩阵
print(fea_train.shape)  # (2257, 35788); 2257 篇文档, 35788 个单词

wordbag.tdm = fea_train  # 为 tdm 赋值
wordbag.vocabulary = vectorizer.vocabulary_
##################################################################
## 创建词袋的持久化
file_obj = open("tmp.data", "wb")
pickle.dump(wordbag, file_obj)
file_obj.close()