def add_node(e1, e2, rel, c1, c2): message = "添加成功" a = [e1, e2, rel, c1, c2] num1 = graph.run("MATCH (m:Concept {name:'%s' })return count(m)" % e1).data()[0]['count(m)'] num2 = graph.run("MATCH (m:Concept {name:'%s' })return count(m)" % e2).data()[0]['count(m)'] if num1 == 0 and num2 == 0: graph.run( "CREATE(n:Concept {name:'%s', cate:'%s'})-[r:%s{relation: '%s'}]->(m:Concept {name:'%s', cate:'%s'})" % (e1, c1, rel, rel, e2, c2)) if num1 != 0 and num2 == 0: graph.run( "MATCH(m: Concept {name: '%s', cate:'%s'}) CREATE(m) - [r:%s{relation: '%s'}]->(n:Concept {name:'%s', cate:'%s'})" % (e1, c1, rel, rel, e2, c2)) if num1 == 0 and num2 != 0: graph.run( "MATCH(m: Concept {name: '%s', cate:'%s'}) CREATE(n:Concept {name:'%s', cate:'%s'})-[r:%s{relation: '%s'}]->(m)" % (e2, c2, e1, c1, rel, rel)) # num = data if num1 != 0 and num2 != 0: graph.run( "MATCH(m: Concept {name: '%s'}),(n:Concept {name:'%s'}) CREATE (m)-[r:%s{relation: '%s'}]->(n)" % (e2, e1, rel, rel)) # message = "此条关系已存在!" print(num1, num2, message) # MATCH(m: Concept {name: 'C语言'}) CREATE(m) - [: 包括]->(n:Concept {name:'函数'}) return m # graph.run("CREATE(n:Concept {name:'%s', cate:'%s'}) return n") %(e1,e2) print(a) return message
def query_all(): nodes = map(buildNodes, graph.run('MATCH (p) RETURN p.Name, p.Clan').data()) edges = map( buildEdges, graph.run( 'MATCH (p)-[r]->(n) RETURN p.Name, r.relation,n.Name').data()) return nodes, edges
def query_paper_info(id): json_data = {'data': [], "links": []} data_item1 = {} data_item1['name'] = id data_item1['category'] = "AUTHOR" json_data['data'].append(data_item1) count = 0 name_dict = {} name_dict[data_item1['name']] = count count += 1 data1 = graph.run("match(p:PAPER{paperID:'%s'} )-[r]->(n:PAPER) return p, r, n" % (id)) data1 = list(data1) for i in data1: data_item2 = {} data_item2['name'] = i['n']['paperID'] data_item2['category'] = "PAPER" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['paperID']] link_item['target'] = name_dict[i['n']['paperID']] link_item['value'] = 'CITATION' json_data['data'].append(data_item2) json_data['links'].append(link_item) data2 = graph.run("match(p:AUTHOR )-[r:AUTHOR2PAPER]->(n:PAPER{paperID:'%s'}) return p, r, n"%(id)) data2 = list(data2) for i in data2: data_item2 = {} data_item2['name'] = i['p']['authorName'] data_item2['category'] = "AUTHOR" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['authorName']] link_item['target'] = name_dict[i['n']['paperID']] link_item['value'] = 'AUTHOR2PAPER' json_data['data'].append(data_item2) json_data['links'].append(link_item) data3 = graph.run( "match(p:PAPER)-[r]->(n:PAPER{paperID:'%s'} ) return p, r, n " % (id)) data3 = list(data3) for i in data3: data_item2 = {} data_item2['name'] = i['p']['paperID'] data_item2['category'] = "PAPER" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['paperID']] link_item['target'] = name_dict[i['n']['paperID']] link_item['value'] = 'CITATION' json_data['data'].append(data_item2) json_data['links'].append(link_item) return json_data
def get_data_num(): data1 = graph.run("match (n) return count(*)").data() entity = data1[0]['count(*)'] data2 = graph.run("MATCH (n)-[r]->() RETURN COUNT(r)").data() relation = data2[0]['COUNT(r)'] # getpath = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) # getpath = ('/').join(getpath.split('\\')) # # with open(getpath + '/raw_data/relation.csv', 'r') as f: # relation = len(f.readlines()) # with open(getpath + '/KGQA/my_dict.txt', 'r', encoding='utf-8') as f: # entity = len(f.readlines()) print(relation, entity) return relation, entity
def query_branch(name, deep): if deep == '二度查询': graph.run( 'CALL apoc.export.json.query("match data=(n1{name:\'%s\'})-[r1]->(n2) return n1 {.*}, r1 {.*}, n2 {.*}", ' '"knows-with-node-properties.json", {writeNodeProperties:true})' % (name)) elif deep == '三度查询': graph.run( 'CALL apoc.export.json.query("match data=(n1{name:\'%s\'})-[r1]->(n2)-[r2]->(n3) return n1 {.*}, r1 {.*}, n2 {.*}, r2 {.*}, n3 {.*}", ' '"knows-with-node-properties.json", {writeNodeProperties:true})' % (name)) elif deep == '四度查询': graph.run( 'CALL apoc.export.json.query("match data=(n1{name:\'%s\'})-[r1]->(n2)-[r2]->(n3)-[r3]->(n4) return n1 {.*}, r1 {.*}, n2 {.*}, r2 {.*}, n3 {.*}, r3 {.*}, n4 {.*}", ' '"knows-with-node-properties.json", {writeNodeProperties:true})' % (name)) else: graph.run( 'CALL apoc.export.json.query("match data=(n1{name:\'%s\'})-[r1]->(n2)-[r2]->(n3) return n1 {.*}, r1 {.*}, n2 {.*}, r2 {.*}, n3 {.*}", ' '"knows-with-node-properties.json", {writeNodeProperties:true})' % (name)) # print(data) json_data = {'data': [], "links": []} # echarts关系图所需要的数据格式 d = [] with open( "D:\\Program Files (x86)\\.Neo4jDesktop\\neo4jDatabases\\database-3c0c8037-2a1b-4d51-baf0-9de130846239\\installation-3.5.12\\import\\knows-with-node-properties.json", encoding='utf-8') as f: for item in f: item = json.loads(item) for i in ['n1', 'n2', 'n3', 'n4', 'n5']: if i in item: d.append(item[i]['name'] + "_" + item[i]['cate']) d = list(set(d)) name_dict = {} count = 0 for j in d: j_array = j.split("_") data_item = {} name_dict[j_array[0]] = count count += 1 data_item['name'] = j_array[0] data_item['category'] = CA_LIST[j_array[1]] json_data['data'].append(data_item) # print(name_dict) with open( "D:\\Program Files (x86)\\.Neo4jDesktop\\neo4jDatabases\\database-3c0c8037-2a1b-4d51-baf0-9de130846239\\installation-3.5.12\\import\\knows-with-node-properties.json", encoding='utf-8') as f: for item in f: item = json.loads(item) for i, key in enumerate(['r1', 'r2', 'r3', 'r4']): if key in item: link_item = {} link_item['source'] = name_dict[item["n" + str(i + 1)]['name']] link_item['target'] = name_dict[item["n" + str(i + 2)]['name']] link_item['value'] = item[key]['relation'] json_data['links'].append(link_item) # print(json_data) return json_data
def get_KGQA_answer(array): data_array = [] for i in range(len(array) - 2): if i == 0: name = array[0] else: name = data_array[-1]['p.Name'] data = graph.run( "match(p)-[r:%s{relation: '%s'}]->(n:Person{Name:'%s'}) return p.Name,n.Name,r.relation,p.cate,n.cate" % (similar_words[array[i + 1]], similar_words[array[i + 1]], name)) data = list(data) print(data) data_array.extend(data) print("===" * 36) with open("./spider/images/" + "%s.jpg" % (str(data_array[-1]['p.Name'])), "rb") as image: base64_data = base64.b64encode(image.read()) b = str(base64_data) return [ get_json_data(data_array), get_profile(str(data_array[-1]['p.Name'])), b.split("'")[1] ]
def get_KGQA_answer2(array): data_array = [] print(array) k = len(array) for i in range(5): if i == 0: name = array[0] else: if i < k: name = array[i] else: break # name=data_array[i]['p.name'] data = graph.run( "match(p{name:'%s'})-[r]->(n) return p.name,n.name,r.property1,p.label,n.label" % (name)) # array.append(data[i].name) # data2 = data['p.name'] data = list(data) data_array.extend(data) if data: for j in range(len(data)): array.append(data[j]['n.name']) else: break k = len(array) print("===" * 36) # with open("./spider/images/"+"%s.jpg" % (str(data_array[-1]['p.name'])), "rb") as image: # base64_data = base64.b64encode(image.read()) # b=str(base64_data) return get_json_data(data_array)
def query(name): data = graph.run( # 匹配所有与查询人有关系的完整数据(无论是正向关系还是反向关系),包括实体及其各种属性 "match(p)-[r]->(n:Person{Name:'%s'}) return p.Name,r.relation,n.Name,p.cate,n.cate\ Union all\ match(p:Person {Name:'%s'}) -[r]->(n) return p.Name, r.relation, n.Name, p.cate, n.cate" % (name,name) ) data = list(data) return get_json_data(data) #
def query(name, method): if method == 'true': data = graph.run( "match(p)-[r]->(n) where n.Name =~ '.*%s.*' return p.Name,r.relation,n.Name,p.cate,n.cate\ Union all\ match(p) -[r]->(n) where p.Name =~ '.*%s.*' return p.Name, r.relation, n.Name, p.cate, n.cate\ " % (name, name)) data = list(data) return get_json_data(data) elif method == 'false': data = graph.run( "match(p)-[r]->(n) where n.Name =~ '%s' return p.Name,r.relation,n.Name,p.cate,n.cate\ Union all\ match(p) -[r]->(n) where p.Name =~ '%s' return p.Name, r.relation, n.Name, p.cate, n.cate\ " % (name, name)) data = list(data) return get_json_data(data)
def query(name): data = graph.run( "match(p )-[r]->(n:Person{Name:'%s'}) return p.Name,r.relation,n.Name,p.cate,n.cate\ Union all\ match(p:Person {Name:'%s'}) -[r]->(n) return p.Name, r.relation, n.Name, p.cate, n.cate" % (name, name) ) data = list(data) return get_json_data(data)
def query_coauthor(name,level): if level == '1': data = graph.run( "match(p )-[r:COAUTHOR]->(n:AUTHOR{authorName:'%s'}) return p.authorName, r, n.authorName\ Union all\ match(p:AUTHOR {authorName:'%s'}) -[r:COAUTHOR]->(n) return p.authorName, r, n.authorName" % (name,name) ) elif level == '2': data = graph.run( "match(p:AUTHOR{authorName:'%s'} )-[r:COAUTHOR*1..2]->(n) return p.authorName, r, n.authorName" % (name) ) elif level == '3': data = graph.run( "match(p:AUTHOR{authorName:'%s'} )-[r:COAUTHOR*1..3]->(n) return p.authorName, r, n.authorName" % (name) ) data = list(data) return get_json_data(data)
def query(name): data = graph.run( # match(p{name: '朱文生'})-[r]->(n)return p.name, r.property1, n.name, p.label, n.label "match(p)-[r]->(n{name:'%s'}) return p.name,r.property1,n.name,p.label,n.label\ Union all\ match(p{Name:'%s'}) -[r]->(n) return p.name, r.property1, n.name, p.label, n.label" % (name, name)) data = list(data) return get_json_data(data)
def query_name(name): nodes = map( buildNodes, graph.run( "match(p)-[r]->(n:Person{Name:'%s'}) return p.Name,n.Name,p.Clan,n.Clan\ Union all\ match(p:Person {Name:'%s'}) -[r]->(n) return p.Name,n.Name,p.Clan,n.Clan" % (name, name)).data()) edges = map( buildEdges, graph.run( "match(p)-[r]->(n:Person{Name:'%s'}) return p.Name, r.relation,n.Name\ Union all\ match(p:Person {Name:'%s'}) -[r]->(n) return p.Name, r.relation,n.Name" % (name, name)).data()) return nodes, edges
def query(name): # print(name) name_ = name.capitalize() data = graph.run( "match(p )-[r]->(n:Keyword{Name:'%s'}) return p.Name,r.relation,n.Name,p.level,n.level\ Union all\ match(p:Keyword{Name:'%s'}) -[r]->(n) return p.Name, r.relation, n.Name, p.level, n.level" % (name_, name_)) data = list(data) # print(data) # json_data = get_json_data(data) return get_json_data(data, name)
def get_KGQA_answer(sqls): json_data = {'data': [], "links": []} for sql_ in sqls: question_type = sql_['question_type'] queries = sql_['sql'] for query in queries: data = graph.run(query) data = list(data) res = get_json_data(data) json_data['data'].extend(res['data']) json_data['links'].extend(res['links']) return json_data
def get_KGQA_answer(array): data_array = [] result = '默认' tags_list = [ '小说', '编程', 'web', '算法', '神经网络', '科技', '名著', '推理', '悬疑', '青春', '言情', '校园', '经济', '漫画', '散文', '其他' ] for i in range(len(array) - 2): if i == 0: name = array[0] else: # 下标为-1代表输出最后一个数 name = data_array[-1]['p.Name'] # 在数据库中查找以name为头实体,array[i+1]为关系的尾实体 data = graph.run( "match(p)-[r]->(n) where r.relation =~ '.*%s.*' and (n.Name =~ '.*%s.*' or p.Name =~ '.*%s.*') return p.Name,n.Name,r.relation,p.cate,n.cate\ " % (similar_words[array[i + 1]], name, name)) # data是查询出来的结果,包含两个实体、实体的属性以及关系 # list()将元组转化为列表 data = list(data) # data_array储存查出来的所有路径上的实体 data_array.extend(data) if str(data_array[-1]['p.cate']) in tags_list: result = str(data_array[-1]['p.Name']) else: result = '默认' # 打开json查找图片地址 with open('./spider/json/data.json', encoding='utf-8') as f: data = json.load(f) img_url = 'https://ss1.bdstatic.com/70cFvXSh_Q1YnxGkpoWK1HF6hhy/it/u=1802553443,2497346274&fm=26&gp=0.jpg' for i in data[result]: if str(i) == "图片链接": img_url = str(data[result][i]) break # 将图片保存到本地 request.urlretrieve(img_url, './spider/images/' + '%s.jpg' % (result)) # with expression [as target]: # expression是一个需要执行的表达式 # target是一个变量或元组,储存的是expression表达式执行返回的结果 # 打开对应的图片 with open("./spider/images/" + "%s.jpg" % (result), "rb") as image: # print(str(data_array[-1]['p.Name'])) # 读取图片存入base64_data变量 base64_data = base64.b64encode(image.read()) # 转化为字符串 b = str(base64_data) # get_json_data() 返回json格式的data_array # get_profile() 返回查出答案的详细信息 # 返回一个数组,包含查出来的所有路径上的实体,查出实体的详细信息,查出实体的图片 # str(data_array[-1]['p.Name'])是查出来的目标结果对于的名字,如"贾宝玉" return [get_json_data(data_array), get_profile(result), b.split("'")[1]]
def get_all_profile(name): s = '' data = graph.run("MATCH (n) WHERE n.name = '%s' RETURN properties(n) as props" % name).data() # print(data) for i in data[0]['props']: # st = "<dt class = \"basicInfo-item name\" >" + str(i) + " \ # <dd class = \"basicInfo-item value\" >" + str(data[0]['props'][i]) + "</dd >" st = "<dt class = \"basicInfo-item name\" >" + str(i) + " \ <input type=\"text\" value=\""+str(data[0]['props'][i])+"\" class=\"form-control\" class=\"prop\">" # <dd class = \"basicInfo-item value\" >" + str(data[0]['props'][i]) + "</dd >" s += st return s # get_all_profile("数据结构")
def fuzzy_search(array): ss = '' for name in array: if name == "?" or name == "的": continue name_lst = name[::1] sql_str = '.*'.join(name_lst) data = graph.run( "match (n:Concept) where n.name =~ '.*%s.*' return n.name" % sql_str).data() s = '' if str(data) != "[]": for i in data: st = '<button class="btn btn-default" data=' + i[ "n.name"] + '>' + i["n.name"] + '</button> ' s += st ss += s return ss
def get_KGQA_answer(array): data_array = [] for i in range(len(array) - 1): if i == 0: name = array[0] else: name = data_array[-1]['n.name'] # n data = graph.run( # "match(p)-[r:%s{relation: '%s'}]->(n:Person{Name:'%s'}) return p.Name,n.Name,r.relation,p.cate,n.cate" % ( # similar_words[array[i+1]], similar_words[array[i+1]], name) "match(p:Concept {name:'%s'})-[r:%s{relation: '%s'}]->(n) return p.name,n.name,r.relation,p.cate,n.cate" % (name, array[i + 1], array[i + 1])) # n, p data = list(data) # print(data) data_array.extend(data) # print(data_array) # b = '' # if len(array) >= 1: # last_name = str(data_array[-1]['n.name']) # image_path = getpath + "/spider/images/%s.jpg" % (str(data_array[-1]['n.name'])) # if not os.path.exists(image_path): # image_path = getpath + "/spider/images/数据结构.jpg" # with open(image_path, "rb") as image: # n # base64_data = base64.b64encode(image.read()) # b = str(base64_data) # else: # last_name = '数据结构' # image_path = getpath + "/spider/images/数据结构.jpg" # with open(image_path, "rb") as image: # n # base64_data = base64.b64encode(image.read()) # b = str(base64_data) # with open(getpath + "/spider/images/%s.jpg" % (str(data_array[-1]['n.name'])), "rb") as image: # n # base64_data = base64.b64encode(image.read()) # b = str(base64_data) # return [get_json_data(data_array), get_profile(str(data_array[-1]['n.name'])), b.split("'")[1]] # n return [get_json_data(data_array)] # n
def all(): data = graph.run( "match (p)-[r]->(n) return p.name, r.relation, n.name, p.cate, n.cate") data = list(data) d = [] for i in data: d.append(i['p.name'] + "_" + i['p.cate']) d.append(i['n.name'] + "_" + i['n.cate']) d = list(set(d)) name_dict = {} count = 0 for j in d: j_array = j.split("_") name_dict[j_array[0]] = count count += 1 name_dict_f = open(getpath + '/neo_db/name_dict.txt', 'w+') name_dict_f.write(str(name_dict)) name_dict_f.close() json_data = get_json_data(data) return json_data, name_dict
def all_graph(): data = graph.run( "match(p)-[r]->(n) return p.Name,r.relation,n.Name,p.cate,n.cate" ) data = list(data) return get_json_data(data)
def delete_node(name): graph.run("match (n {name:'%s'} ) detach delete n" % name) return
def concept(): data = list(graph.run("MATCH (n:CONCEPT) RETURN n LIMIT 10000")) f = open('../raw_data/concept.txt', 'w', encoding='utf-8') for d in data: f.write(d['n']['conceptName'] + '\n')
def author(): data = list(graph.run("MATCH (n:AUTHOR) RETURN n LIMIT 10000")) f = open('../raw_data/author.txt', 'w', encoding='utf-8') for d in data: f.write(d['n']['authorName'] + '\n')
def query_author_info(name): data = graph.run("match(p :AUTHOR{authorName:'%s'})-[r]->(n) return p, r, n "%(name)) data = list(data) json_data = {'data': [], "links": []} data_item1 = {} data_item1['name'] = name data_item1['category'] = "AUTHOR" json_data['data'].append(data_item1) count = 0 name_dict = {} name_dict[data_item1['name']] = count count+=1 for i in data: if i['r']['type'] == 'interest': data_item2 = {} data_item2['name'] = i['n']['conceptName'] data_item2['category'] = "CONCEPT" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['authorName']] link_item['target'] = name_dict[i['n']['conceptName']] link_item['value'] = 'AUTHOR2CONCEPT' json_data['data'].append(data_item2) json_data['links'].append(link_item) if i['r']['type'] == 'belong2': data_item2 = {} data_item2['name'] = i['n']['affiliationName'] data_item2['category'] = "AFFILIATION" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['authorName']] link_item['target'] = name_dict[i['n']['affiliationName']] link_item['value'] = 'AUTHOR2AFFILIATION' json_data['data'].append(data_item2) json_data['links'].append(link_item) if i['r']['type'] == 'own': data_item2 = {} data_item2['name'] = i['n']['paperID'] data_item2['category'] = "PAPER" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['authorName']] link_item['target'] = name_dict[i['n']['paperID']] link_item['value'] = 'AUTHOR2PAPER' json_data['data'].append(data_item2) json_data['links'].append(link_item) if i['r']['type'] == 'Collaborate': data_item2 = {} data_item2['name'] = i['n']['authorName'] data_item2['category'] = "AUTHOR" name_dict[data_item2['name']] = count count += 1 link_item = {} link_item['source'] = name_dict[i['p']['authorName']] link_item['target'] = name_dict[i['n']['authorName']] link_item['value'] = 'COAUTHOR' json_data['data'].append(data_item2) json_data['links'].append(link_item) return json_data
def get_hot_author(): data = graph.run( "match(p:AUTHOR{authorName:'%s'} )-[r:COAUTHOR*1..3]->(n) return p.authorName, n.authorName" ) data = list(data)
from py2neo import Graph, Node, Relationship import json from neo_db.config import graph graph.run("match (n) detach delete n") with open("../raw_data/relation.csv", encoding='gbk') as f: for line in f.readlines(): rela_array = line.strip("\n").split(",") print(rela_array) graph.run("merge (p:Concept {cate:'%s', name: '%s'})" % (rela_array[3], rela_array[0])) graph.run("merge (p: Concept{cate:'%s',name: '%s'})" % (rela_array[4], rela_array[1])) graph.run( "MATCH(e: Concept), (cc: Concept) WHERE e.name='%s' AND cc.name='%s' CREATE(e)-" "[r:%s{relation: '%s'}]->(cc) RETURN r" % (rela_array[0], rela_array[1], rela_array[2], rela_array[2])) # 添加属性 name_lst = [] with open('../KGQA/my_dict.txt', 'r', encoding='utf-8') as f: for line in f: name_lst.append(line.strip('\n')) with open('../spider/json/data.json', encoding='utf-8') as f: data = json.load(f) for name in name_lst: for i in data[name]: print(name, i, data[name][i]) j = "".join(i.split()) graph.run('match (n:Concept{name:"%s"}) set n.%s="%s"' % (name, j, data[name][i]))
def query_by_sentence(sent_list): if (sent_list[0] == 1): #build nodes 焦大和尤氏 nodes1 = map( buildNodes2, graph.run( "match t = allshortestPaths((p:Person) -[*]->(n:Person)) WHERE p.Name = '%s' and n.Name = '%s' return t as shortpath, NODES(t) AS nodes Union all match t = allshortestPaths((p:Person) -[*]->(n:Person)) WHERE p.Name = '%s' and n.Name = '%s' return t as shortpath, NODES(t) AS nodes" % (sent_list[1], sent_list[2], sent_list[2], sent_list[1])).data()) nodes = [] for i in list(nodes1): for ii in i: tmpdict = {} tmpdict['data'] = ii nodes.append(tmpdict) #build edges edges1 = map( buildEdges2, graph.run( "match t = allshortestPaths((p:Person) -[*]->(n:Person)) WHERE p.Name = '%s' and n.Name = '%s' return t as shortpath, NODES(t) AS nodes Union all match t = allshortestPaths((p:Person) -[*]->(n:Person)) WHERE p.Name = '%s' and n.Name = '%s' return t as shortpath, NODES(t) AS nodes" % (sent_list[1], sent_list[2], sent_list[2], sent_list[1])).data()) edgelist = list(edges1) edges = [] for i in edgelist: for ii in i: tmpdict = {} tmpdict['data'] = ii edges.append(tmpdict) return nodes, edges elif (sent_list[0] == 2): nodes = map( buildNodes, graph.run( "match(n:Person{Name:'%s'})-[r:%s{relation: '%s'}]->(p) return p.Name,n.Name,p.Clan,n.Clan \ Union all\ match(p:Person{Name:'%s'})-[r:%s{relation: '%s'}]->(n) \ return p.Name,n.Name,p.Clan,n.Clan" % (sent_list[1], sent_list[3], sent_list[3], sent_list[1], sent_list[3], sent_list[3])).data()) edges = map( buildEdges, graph.run( "match(p:Person{Name:'%s'})-[r:%s{relation: '%s'}]->(n) return p.Name, r.relation,n.Name" % (sent_list[1], sent_list[3], sent_list[3])).data()) elif (sent_list[0] == 3): nodes = map( buildNodes, graph.run( "match(n)-[r:%s{relation: '%s'}]->(p:Person{Name:'%s'}) return p.Name,n.Name,p.Clan,n.Clan \ Union all\ match(p)-[r:%s{relation: '%s'}]->(n:Person{Name:'%s'}) \ return p.Name,n.Name,p.Clan,n.Clan" % (sent_list[3], sent_list[3], sent_list[2], sent_list[3], sent_list[3], sent_list[2])).data()) edges = map( buildEdges, graph.run( "match(p)-[r:%s{relation: '%s'}]->(n:Person{Name:'%s'}) return p.Name, r.relation,n.Name" % (sent_list[3], sent_list[3], sent_list[2])).data()) else: pass return nodes, edges
def query_path(a, b): data = graph.run( "MATCH p =shortestPath((n { name: '%s' })-[*..15]-(m {name:'%s'})) RETURN p" % (a, b)) data = data.data() # print(data) data = data[0]['p'] data = str(data) # print(data) pattern11 = re.compile("\[:([\u4e00-\u9fa5]*) {}\]->\(([\u4e00-\u9fa5]*)\)" ) # [:属于 {}]->(线性结构) pattern22 = re.compile("\(([\u4e00-\u9fa5]*)\)<-\[:([\u4e00-\u9fa5]*) {}\]" ) # (线性结构)<-[:分类 {}] pattern3 = re.compile(":([\u4e00-\u9fa5]*)") # :属于 pattern4 = re.compile("\(([\u4e00-\u9fa5]*)\)") # (串) right = pattern11.findall( data) # [('属于', '线性结构'), ('研究对象', '存储结构'), ('包括', '索引存储')] left = pattern22.findall(data) # [('线性结构', '分类')] # relation = pattern3.findall(data) # ['属于', '分类', '研究对象', '包括'] concept = pattern4.findall(data) # ['串', '线性结构', '数据结构', '存储结构', '索引存储'] arr_list = [] for item in right: a = "" for j in range(len(concept)): if concept[j] == item[1]: a += concept[j - 1] + "," + item[1] + "," + item[0] arr_list.append(a) for item in left: a = "" for j in range(len(concept)): if concept[j] == item[0]: a += concept[j + 1] + "," + item[0] + "," + item[1] arr_list.append(a) print( f"list:{arr_list}" ) # list:['串,线性结构,属于', '数据结构,存储结构,研究对象', '存储结构,索引存储,包括', '数据结构,线性结构,分类'] data_dict = col_to_dic() json_data = {'data': [], "links": []} d = [] for line in arr_list: rela_array = line.split(",") for item in data_dict: if rela_array[0] in data_dict[item]: rela_array.append(item) if rela_array[1] in data_dict[item]: rela_array.append(item) d.append(rela_array[0] + "_" + rela_array[3]) d.append(rela_array[1] + "_" + rela_array[4]) d = list(set(d)) name_dict = {} count = 0 for j in d: j_array = j.split("_") data_item = {} name_dict[j_array[0]] = count count += 1 data_item['name'] = j_array[0] data_item['category'] = CA_LIST[j_array[1]] json_data['data'].append(data_item) for line in arr_list: rela_array = line.split(",") link_item = {} link_item['source'] = name_dict[rela_array[0]] link_item['target'] = name_dict[rela_array[1]] link_item['value'] = rela_array[2] json_data['links'].append(link_item) return json_data
def update_node(node, cate): print(node, cate) graph.run("MATCH (n) WHERE n.name = '%s' SET n.cate = '%s'" % (node, cate)) return