Example #1
0
def countCiYun():

    comments = mongoutil.getCollection1()
    print('数据总条数count:', comments.estimated_document_count())
    # pymongo.cursor.Cursor
    cursor = comments.find()
    # 遍历数据,把所有comment的字符都拼到一起
    text = ''.join(map(lambda doc: doc.get('comment'), cursor))

    #用户自定义分词
    jieba.load_userdict(r'../analysis/user_dict.txt')
    #屏蔽关键词列表
    analyse.set_stop_words(r'../analysis/stopwords.txt')

    m = collections.Counter(text)
    tags = analyse.extract_tags(text, topK=40, withWeight=False)
    #词云所有词列表
    new_text = ' '.join(tags)
    #countFinalWordsList(text,new_text)

    # 对分词文本生成词云
    # 生成词云,需要指定支持中文的字体,否则无法生成中文词云
    wc = WordCloud(
        max_words=200,  # 设置词云最大单词数
        width=1099,  # 设置词云图片宽、高
        height=724,
        # 设置词云文字字体(美化和解决中文乱码问题)
        font_path=r'../example/fonts/FZXingKai-S04S.TTF').generate(new_text)

    # 绘图(标准长方形图)
    pyplot.imshow(wc, interpolation='bilinear')
    pyplot.figure()
    pyplot.axis('off')
    wc.to_file(r'../static/images/wc_8_changanCS35PLUS.png')
#coding:utf-8
from pyecharts import Pie
import sys
from config import mongoutil

#HU交互(大屏)用户体验统计

reload(sys)
sys.setdefaultencoding('utf-8')

comments = mongoutil.getCollection1()
print('数据记录条数count:', comments.estimated_document_count())
cursor = comments.find()
text = ''.join(map(lambda doc: doc.get('comment'), cursor))

attr = [
    "难用", "反应慢", "易死机", "更新快", "设计高大上", "难看", "交互差", "交互好", "上手快", "上手慢", "易用"
]
v1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for index, i in enumerate(attr):
    attri = attr[index]
    print attri
    count = text.count(attri)
    v1[index] = count

print v1
#HU交互(大屏)用户体验统计
pie = Pie("")
pie.add(
    "",
    attr,