def frp_multi(fr): zz = fr['reviews'] multi_list = [] for yy in zz: xx = yy['summary'] multi_list.append(xx) abc = SentimentAnalysis() list_tds = [] list_qc = [] list_c = [] list_dict = [] for ss in multi_list: ss_a = list(jieba.cut(ss, cut_all=False)) ss_b = " ".join(ss_a) #print (ss_b) result = abc.analyze(ss_b) #print (result) for item in result: t = item[0] d = item[1] s = item[4] tds = [t, d, s] list_tds.append(tds) for tri in list_tds: if tri not in list_qc: list_qc.append(tri) tri_count = list_tds.count(tri) list_c.append(tri_count) for ww in list_qc: vc_index = list_qc.index(ww) vc = list_c[vc_index] vt = ww[0] vd = ww[1] vs = ww[2] dict_a = {"对象": vt, "评价极性": vs, "描述词": vd, "评论数": vc} list_dict.append(dict_a) df = pd.DataFrame(list_dict, columns=["对象", "评价极性", "描述词", "评论数"]) df.to_csv("./ndetails.csv", index=False)
def frp_single(sentence): abc = SentimentAnalysis() result = abc.analyze(sentence) str_a = [] jsonlist_a = [] for item in result: aspect = item[0] opinion = item[1] relation = item[4] #t = *** a = {'target': aspect, 'description': opinion, 'sentiment': relation} str_a.append(a) for i in str_a: json_info = json.dumps(i, default=set_default, ensure_ascii=False) jsonlist_a.append(json_info) return jsonlist_a
from preprocess import WordSet, WordEmbedding, KnowledgeBase from sentiment_analysis import SentimentAnalysis #sentence = '外观 漂亮' #sentence = '外观 不 太 漂亮' #sentence = '高 规格 的 用料 和 精致 的 做工' #sentence = '炫酷 的 造型 、 充沛 的 动力 再 加上 本田 家族 运动 基因 的 传承' parser = argparse.ArgumentParser() parser.add_argument('-s', required=True) args = parser.parse_args() sentence = args.s abc = SentimentAnalysis() result = abc.analyze(sentence) print('--------------------') print('%s\n' % (sentence)) for item in result: aspect = item[0] opinion = item[1] relation = item[4] print('%s\t%s\t%s' % (aspect, opinion, relation)) print('--------------------') for item in result: aspect = item[0] opinion = item[1] relation = item[4] knowledge = item[5]