def task(param): print('加载模型中...') NlpModel = nlpModel() W2vModel = w2vModel() TripleModel = tripleModel(NlpModel) QAModel = qaModel('qa_data\\rkb' + str(param['id']) + '.json', TripleModel, W2vModel) qaResult = [] noqaResult = [] lines = param['lines'] sentence_number = 0 knowledge_number = 0 for line in lines: try: qaList = line.strip().split('\t') if len(qaList) != 2: continue question = qaList[0] answer = qaList[1] score, reason = QAModel.getMatchScore(question, answer) if score > 0.7: knowledge_number += 1 qaResult.append(question + '\t' + answer + '\n') else: noqaResult.append(question + '\t' + answer + '\n') except: print('process:' + str(param['id']) + ' error ' + line) traceback.print_exc() sentence_number += 1 if sentence_number % 100 == 0: print( 'process:%d done%d qa%d noqa%d' % (param['id'], sentence_number, len(qaResult), len(noqaResult))) print('process:%d finish%d qa%d noqa%d' % (param['id'], sentence_number, len(qaResult), len(noqaResult))) QAModel.saveKB() result = { 'qaResult': qaResult, 'noqaResult': noqaResult, 'sentence_number': sentence_number, 'knowledge_number': knowledge_number } return result
# -*- coding: utf-8 -*- import sys import traceback from nlpModel import nlpModel from qaModel import qaModel from w2vModel import w2vModel from tripleModel import tripleModel modelname = 'beam1.gm' print('加载模型中...') NlpModel = nlpModel() W2vModel = w2vModel() TripleModel = tripleModel(NlpModel) QAModel = qaModel('qa_data/kb.json', TripleModel, W2vModel) print('开始读取文件') scoreNums = [0] * 11 scoreSum = 0 with open('Twitter.100w.test.key', 'r') as keyFile, open('Twitter.100w.test.beam1.gm.output', 'r') as valueFile,\ open('Twitter.100w.rule.'+modelname+'.score','w') as out_file: id = 1 for key, value in zip(keyFile, valueFile): key = key.strip() value = value.strip() try: