import pandas as pd from py2neo import Graph, Node, Relationship, NodeMatcher # 读取Excel文件 df = pd.read_excel('./excels/santi.xlsx') # 取前610行作为图谱数据 # df = df.iloc[:706, :] # 连接Neo4j服务 graph = Graph(host="localhost://7474", auth=("neo4j", "jc147369")) # 创建节点 nodes = set(df['S'].tolist() + df['O'].tolist()) for node in nodes: node = Node("Node", name=node) graph.create(node) print('create nodes successfully!') # 创建关系 matcher = NodeMatcher(graph) for i in range(df.shape[0]): S = df.iloc[i, :]['S'] # S节点 O = df.iloc[i, :]['O'] # O节点 s_node = matcher.match("Node", name=S).first() o_node = matcher.match("Node", name=O).first() # 创建关系 P = df.iloc[i, :]['P'] relationship = Relationship(s_node, P, o_node)
filepaths = getfiles() for i in filepaths: # f2 = open(i.replace(".xml", ".json"), 'w') # 将xml转换为json并保存到本地 # f2.write(xmlToJson(i)) # f2.close() dir = xmltodict.parse(open(i, "r").read(), encoding='utf-8') # 将xml转换为字典 lis = [] # 用于暂时存储单行数据的list lis.append(dir['WMS_Capabilities']['Service']['Title']) # 数据第一列:服务的标题 fNode = Node("WMS", name=dir['WMS_Capabilities']['Service']['Title']) # 创建服务实体节点 myGraph.create(fNode) # 实体节点导入图谱 a = dir['WMS_Capabilities']['Capability']['Layer'] while type(a) != list and "Layer" in a.keys(): a = a['Layer'] if type(a) == list: for List in a: if "Title" in List.keys(): lis.append(List["Title"]) node_1 = Node('Title', content=List["Title"]) # 创建属性节点 Rel_1 = Relationship(fNode, "keyword", node_1) # 创建实体节点与属性节点的关系 myGraph.create(node_1) # 属性节点导入图谱 myGraph.create(Rel_1) # 实体属性关系导入图谱 else:
def add_task(self, name, category): task = Node('TodoTask', name=name) cat = Node('TodoCategory', category=category) rel = Relationship(task, 'CONCERN', cat) graph.create(rel) print(rel)
def get_query(text): dates = [] tid = [] my_dict = {} tx = graph.begin() count = 0 for i in text: dates.append(text[i]['datetime']) tid.append(i) my_dict[i] = text[i]['datetime'] dates, tid = zip(*sorted(zip(dates, tid))) for i in range(len(tid)): count += 1 ttid = tid[i] if (count % 100 == 0): print(count, time.time() - start_time) tid_label = Node("tid", tid=ttid, lang=text[ttid]["lang"], location=text[ttid]["location"], date=text[ttid]["date"], datetime=text[ttid]["datetime"], quote_count=text[ttid]["quote_count"], reply_count=text[ttid]["reply_count"], like_count=text[ttid]["like_count"], verified=text[ttid]["verified"], sentiment=text[ttid]["sentiment"], type=text[ttid]["type"], tweet_text=text[ttid]["tweet_text"]) author_label = Node( "author", author_id=text[ttid] ["author_id"]) #,author_sname=text[ttid]["author_screen_name"]); author_name_label = Node("author_name", author_name=text[ttid]["author"]) author_sname_label = Node( "author_sname", author_sname=text[ttid]["author_screen_name"]) # # image_label=Node("image",image_url=text[ttid]["author_profile_image"]) # # date_label=Node("date",date=text[ttid]["date"],datetime=text[ttid]["datetime"]) # # location_label=Node("location",location=text[ttid]["location"]) # # lang_label=Node("lang",lang=text[ttid]["lang"]) # # lang_relation=Relationship(tid,"was tweeted in language",lang_label) # # location_tid_relationship=Relationship(tid,'was tweeted from',location_label) # # date_tid_relationship=Relationship(tid,"was tweeted on",date_label) # # author_image_relationship=Relationship(author_label,"has image",image_label) author_relationship = Relationship(tid_label, "has_author", author_label) author_s_relationship = Relationship(tid_label, "has_s_author", author_sname_label) author_name_relationshipo = Relationship(author_label, "has_name", author_name_label) glob_dictionary[ttid] = tid_label tx.merge(tid_label) tx.merge(author_label) tx.merge(author_relationship) tx.merge(author_name_relationshipo) tx.merge(author_s_relationship) # # tx.merge(lang_relation) # # tx.merge(location_tid_relationship) # # tx.merge(date_tid_relationship) # # tx.merge(author_image_relationship) # # print("here") # # try: # # tx.merge(author_relationship) # # except: # # print(author_label) # # exit(); # # print("here") if (text[ttid]["quoted_source_id"] != None): quoted_dict[ttid] = text[ttid]["quoted_source_id"] if (text[ttid]["retweet_source_id"] != None): retweet_source_dict[ttid] = text[ttid]["retweet_source_id"] if (text[ttid]["replyto_source_id"] != None): replyto_source_dict[ttid] = text[ttid]["replyto_source_id"] if (text[ttid]["hashtags"] != None): for hasht_val in text[ttid]["hashtags"]: hashtag_label = Node("hashtag", hashtag=hasht_val) hashtag_relationship = Relationship(tid_label, "has_hashtag", hashtag_label) tx.merge(hashtag_label) tx.merge(hashtag_relationship) # if(text[ttid]["url_list"]!=None): # for url_val in text[ttid]["url_list"]: # url_label=Node("url",url=url_val) # url_relationship=Relationship(tid_label,"has url_list",url_label) # tx.merge(url_label) # tx.merge(url_relationship) if (text[ttid]["keywords_processed_list"] != None): for key_val in text[ttid]["keywords_processed_list"]: keywords_label = Node("keywords", keyword=key_val) keywords_label_relationship = Relationship( tid_label, "has_keyword", keywords_label) tx.merge(keywords_label) tx.merge(keywords_label_relationship) if (text[ttid]["mentions"] != None): for mention_val in text[ttid]["mentions"]: mentions_label = Node("mentions", mentions=mention_val) mentions_label_relationship = Relationship( tid_label, "has_mention", mentions_label) tx.merge(mentions_label) tx.merge(mentions_label_relationship) tx.commit()
def month(self, year, month): path = Path(self.root, "YEAR", Node("Year", year=year), "MONTH", Node("Month", year=year, month=month)) self.graph.create_unique(path) return path.end_node
sql = "MATCH (n:`疾病`{名称:'%s'}) set n.治疗周期='%s'" % (b['名称'], a['治疗周期']) graph.run(sql) # 医保/传染性暂不更新 else: # 如果不存在该疾病节点,则新建节点 # 创建疾病节点 node_jb = Node('疾病', '寻医问药', 病因=b['病因'], 别称=b['别称'], 英文名=b['英文名'], 温馨提示=b['温馨提示'], 患病比例=b['患病比例'], 定义=b['定义'], 名称=b['名称'], 治疗费用=b['治疗费用'], 多发人群=b['多发人群'], 患病比例值=b['患病比例值'], 医保=b['医保'], 传染性=b['传染性'], 治疗周期=b['治疗周期'], 治愈率=b['治愈率'], 治愈率值=b['治愈率值']) graph.merge(node_jb) #更新科室 for ks in b['科室']: if ks != '暂无数据': sql = "MATCH (n:`科室`{名称:'%s'}) RETURN n.名称" % (ks) m = graph.run(sql).data()
# override config file if ever neo_db = { 'host': options.neohost or config_neo_db['host'], 'port': options.neoport or str(config_neo_db['port']), 'username': options.neousername or config_neo_db['username'], 'password': options.neopassword or config_neo_db['password'] } n = CMgDB(neo_db) file = open(options.jsonfile, 'r') jsondata = json.load(file) file.close() for ip in jsondata: neo_ip = Node('Ip', name=ip) n.graph.merge(neo_ip) neo_vlan = n.graph.run('match (v:Vlan) where v.id="' + jsondata[ip]['vlan'] + '" return v limit 1').evaluate() if neo_vlan == None: neo_vlan = Node('Vlan', name=jsondata[ip]['vlan'], id=jsondata[ip]['vlan']) n.graph.merge(neo_vlan) neo_nic = Node('Nic', name=jsondata[ip]['mac']) if options.create: n.graph.create(neo_nic) else: n.graph.merge(neo_nic) n.link_nodes(neo_ip, neo_vlan, neo_nic)
def test_can_create_node(graph): a = Node("Person", name="Alice") with graph.begin() as tx: tx.create(a) assert a.graph == graph assert a.identity is not None
from ip_renew import * from bs4 import BeautifulSoup import re from py2neo import authenticate, Graph, Path, Node, Relationship import requests from py2neo import authenticate, Graph, Path, Node, Relationship #from dbconnect import * authenticate("localhost:7474", "neo4j", "password") graph = Graph("http://localhost:7474/db/data") labels = graph.node_labels website_name = "exploit-db" website_node = Node("Website", name=website_name) graph.merge(website_node) user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' headers = {'User-Agent': user_agent} #authenticate("localhost:9066", "neo4j","password") #graph = Graph("http://localhost:7474/db/data") #website = "Oday-test" #website_node = Node("Website", name=website) #graph.create(website_node) #website = "Oday-test" #website_node = Node("Website", name=website) #graph.create(website_node)
totalconcept = list(set(totalconcept)) conceptnode = concept.find({"_id": { "$in": totalconcept }}, { "_id": 1, "one_of_name": 1, "definition": 1, "semtype": 1 }) conceptnode = list(conceptnode) for i in range(len(conceptnode)): row = conceptnode[i] if "definition" not in row: tempnode = Node('&'.join(row["semtype"]), name=row["_id"], oneofname=row["one_of_name"]) else: tempnode = Node('&'.join(row["semtype"]), name=row["_id"], definition=('|'.join(row["definition"])), oneofname=row["one_of_name"]) g.create(tempnode) if i % 1000 == 0: print(i) del (conceptnode) def create_relationship(totalconcept): count = 0 conceptnode = concept.find({"_id": {
password = str(os.environ.get('NEO4J_PASSWORD')) user = '******' # authenticate("localhost:7474", user, password) g = Graph("http://localhost:7474/db/data/", password=password) posts_file = '/home/srallaba/projects/dissertation/emnlp2020.tdd' papers_file = 'data/papers.csv' nodestuff_file = 'nodes.emnlp2020' tx = g.begin() g.delete_all() # Create a node for the conference conference = Node("Conferences", name='EMNLP 2020') tx.create(conference) tx.commit() tx = g.begin() # Populate nodes nodes_dict = {} f = open(nodestuff_file) for line in f: line = line.split('\n')[0].split(',') if len(line) < 2: continue key, val1, val2 = line[0], line[1], line[2] node = Node(val1, name=val2) nodes_dict[key] = node tx.create(node)
def add_fb_friends(self, friends): user = self.find() for friend in friends: rel = Relationship(user, 'FRIEND', Node('User', name=friend['name'], id=friend['id'])) graph.merge(rel)
def add_fb_likes(self, likes): user = self.find() for like in likes: rel = Relationship(user, 'LIKE', Node('Likes', name=like['name'], id=like['id']), created_time=like['created_time']) graph.merge(rel)
if __name__ == '__main__': data = ProcessXML('Ara2009.xml') graph = Graph('bolt://palsson.di.uminho.pt:6092', auth=('neo4j', '123')) compartments = data.getCompartments() species = data.getSpecies() reactions = data.getReactions() dicReact = data.getReactionDict() ''' nodeC = Node("Compartment", id='C', name='Cytosol', model='Arabidopsis') graph.create(nodeC) nodeM = Node("Compartment", id='M', name='Mitochondrion', model='Arabidopsis') graph.create(nodeM) nodeE = Node("Compartment", id='E', name='Extracellular', model='Arabidopsis') graph.create(nodeE)''' matcher = NodeMatcher(graph) nodeModel = Node('Model', id='Arabidopsis', name='Arabidopsis') for spec in species.keys(): try: if '_mit' in species[spec][0]: id = species[spec][0].replace('_mit', '') nodeSpec = matcher.match('Species').where( '_.id="' + id + '"').where('_.compartment="' + 'M' + '"') relation = Relationship(nodeModel, 'CONTAINS', nodeSpec.first()) graph.create(relation) elif 'x_' in species[spec][0]: id = species[spec][0].replace('x_', '') nodeSpec = matcher.match('Species').where( '_.id="' + id + '"').where('_.compartment="' + 'E' + '"') relation = Relationship(nodeModel, 'CONTAINS',
logging = logging.getLogger(__name__) graph = Graph("http://192.168.1.100:7474", username="******", password='******') graph.delete_all() # graph.merge() ''' 1 —— 创建node,函数第一个参数是节点类型,第二个参数是value值 ''' # a = Node('PersonTest', name='张三',key='s') # b = Node('PersonTest', name='张三',key='s') # # r = Relationship(a, 'KNOWNS', b) # s = a | b #| r # graph.create(s) tx = graph.begin() a = Node('PersonTest', name='张三', key='s') b = Node('PersonTest', name='张三1', key='s1', age=33) tx.merge(a, 'PersonTest', 'name') tx.merge(b, 'PersonTest', 'name') tx.commit() # a['地点']=['sf','sff'] # graph.push(a) # graph. json = { '中文名称': ['北京故宫博物院'], '历经朝代': ['明朝,清朝'], '地点': ['中国北京'], '外文名称': ['The Palace Museum'], '建议游玩时长': ['3-4小时'],
elif layer.name != "IP" and layer.name != "Padding": yield layer.name counter += 1 # Reading PCAP file and accessing graph packets = rdpcap("smallFlows.pcap") g = Graph(password="******") # Creating nodes, their relationships # and adding them to the graph for packet in packets: layers = [] try: a = Node("Host", name=packet.getlayer(IP).src) b = Node("Host", name=packet.getlayer(IP).dst) except AttributeError: a = Node("Host", name=packet.getlayer(ARP).psrc) b = Node("Host", name=packet.getlayer(ARP).pdst) for layer in get_layers(packet): layers.append(layer) protocols = ':'.join(layers) relation = Relationship.type(':'.join( OrderedDict.fromkeys(protocols.split(':')))) g.merge(relation(a, b), "Host", "name")
def filter(tailUrl): url = 'https://en.wikipedia.org/wiki/' + tailUrl if not re.search('\.(jpg|JPG)$', url): if not ':' in t.get('href') and not '%' in t.get('href'): return 1 headUrl = 'https://en.wikipedia.org/wiki/' num = 1 k = 1 tailUrl = "Moon" while True: for n in range(1, 99): textList = getHtml(tailUrl) node1 = Node(label="FatherWord", name=tailUrl) neo_graph.create(node1) index = 0 try: urlDict = {} for t in textList: if filter(tailUrl): node2 = Node(label="SonWord", name=t.get_text()) neo_graph.create(node1) node1TOnode2 = Relationship(node1, 'Include', node2) neo_graph.create(node1TOnode2) urlDict[index] = t.get_text() index += 1 print('\r\n') k += 1 tailUrl = urlDict[random.randint(2, 8)]
def push_in(graph, label, dname, ename=None, cname=None): node = Node(label, ename=ename, cname=cname, xname=dname) graph.merge(node)
def insert_neo(lable,name,pname): c_node = Node(lable, name=name) p_node = Node(lable, name=pname) rel = Relationship(c_node, "ISA", p_node) g.merge(rel)
# -*- coding: utf-8 -*- """... """ from py2neo import Database, Graph from py2neo import Node, Relationship if __name__ == '__main__': default_db = Database(uri='bolt://localhost:7687') default_db.forget_all() # conf = default_db.config() # grap = default_db.default_graph() auth = ('neo4j', 'alxfed') data = Graph(auth=auth, secure=False) # host='bolt://localhost:7687', encrypted=False, data.delete_all() a = Node("Person", name="Alice", age=33) a.remove_label('Person') b = Node("Person", name="Bob", age=44) KNOWS = Relationship.type("KNOWS") data.merge(KNOWS(a, b), "Person", "name") print('\ndone')
text = json.load(fp) fp.close() current_file_number += 1 print("progress is = ", current_file_number, "/", 113, "time=", time.time() - start_time) print(x) get_query(text) tx = graph.begin() count = 0 for i in retweet_source_dict: count += 1 if (count % 100 == 0): print(count, time.time() - start_time) if (retweet_source_dict[i] not in glob_dictionary): tid_label = Node("tid", tid=retweet_source_dict[i]) else: tid_label = glob_dictionary[retweet_source_dict[i]] tx.merge(Relationship(glob_dictionary[i], "retweeted_from", tid_label)) tx.commit() tx = graph.begin() count = 0 for i in replyto_source_dict: count += 1 if (count % 100 == 0): print(count, time.time() - start_time) if (replyto_source_dict[i] not in glob_dictionary): tid_label = Node("tid", tid=replyto_source_id[i]) else: tid_label = glob_dictionary[replyto_source_id[i]]
if k.get('@class') == 'LOVES' or k.get('@class') == 'BEGETS' or k.get('@class') == 'HASSIBLING': reco_rel[k.get("@rid")] = k ## NOEUDS # TABLE CREATURE a = 0 crea = {} for k in reco_crea.items() : d = k[1] if d.get("@class") == "Creature": a += 1 crea[k[0]] = Node("Creature", rid = d.get("@rid"), searchname = d.get("searchname"), uniquename = d.get("uniquename"), gender = d.get("gender"), race = d.get("race"), gatewaylink = d.get("gatewaylink"), born = d.get("born"), altname = d.get("altname"), died = d.get("died"), significance = d.get("significance"), name = d.get("name"), location = d.get("location"), illustrator = d.get("illustrator")) graph.create(crea[k[0]]) print("On a bien ",a," noeuds creature (",len(reco_crea)," creatures comptees plus haut)") # TABLE LOCATION a = 0 loc = {} for k in reco_loc.items() : d = k[1] if d.get("@class") == "Location": a += 1 loc[k[0]] = Node("Location", rid = d.get("@rid"), significance = d.get("significance"), area = d.get("area"), searchname = d.get("searchname"), uniquename = d.get("uniquename"), gatewaylink = d.get("gatewaylink"), name = d.get("name"), altname = d.get("altname"), type = d.get("type"), age = d.get("age"), canon = d.get("canon"), illustrator = d.get("illustrator")) graph.create(loc[k[0]]) print("On a bien ",a," noeuds location (",len(reco_loc)," locations comptees plus haut)")
def year(self, year): path = Path(self.root, "YEAR", Node("Year", year=year)) self.graph.create_unique(path) return path.end_node
from py2neo import Graph, Node, Relationship, NodeSelector a = Node('Person', name='yzk') b = Node('Person', name='whf') r = Relationship(a, 'konws', b, since=1999) s = a | b | r graph = Graph('http://52.83.213.55:7474', user='******', password='******') # graph.create(s) select = NodeSelector(graph) per = select.select('Person').where(name='yzk').first() # print(per)
def day(self, year, month, day): path = Path(self.root, "YEAR", Node("Year", year=year), "MONTH", Node("Month", year=year, month=month), "DAY", Node("Day", year=year, month=month, day=day)) self.graph.create_unique(path) return path.end_node
def get_or_create_user(conn, cursor, google_id, googleAPIResponse): name = googleAPIResponse.get('name', None) if 'image' in googleAPIResponse: image = googleAPIResponse['image'] url = image.get('url', None) if url is None: imgurl = None else: indexVal = url.find("?sz") if indexVal != -1: imgurl = url[0:indexVal] else: imgurl = url else: imgurl = None if name is None: givenName = None displayName = None familyName = None else: givenName = name.get('givenName', None) displayName = givenName familyName = name.get('familyName', None) emails = googleAPIResponse.get('emails', None) if emails is None: email = None else: email = emails[0]['value'] gender = googleAPIResponse.get('gender', None) organizations = googleAPIResponse.get('organizations', None) if organizations is None: organization = None else: organization = organizations[0]['name'] cursor.execute('select id from users where google_id=%s', [google_id]) row = cursor.fetchone() if row is None: # create user cursor.execute('insert into users (google_id, email) values (%s,%s)', [google_id, email]) result = cursor.lastrowid conn.commit() user = Node("User", sql_id=result, google_id=google_id, email=email, givenName=givenName, familyName=familyName, displayName=displayName, user_type="subscriber", organization=organization, creation_time=timestamp(), modified_time=timestamp(), dob="", gender=gender, status=0, image_url=imgurl) social_graph().create(user) else: result = row[0] user = User(result).find() # There might be cases where the Neo4J does not have the corressponding User node if user is None: missing_user_neo4j = Node("User", sql_id=result, google_id=google_id, email=email, givenName=givenName, familyName=familyName, displayName=displayName, user_type="subscriber", organization=organization, creation_time=timestamp(), modified_time=timestamp(), dob="", gender=gender, image_url=imgurl, status=0) social_graph().create(missing_user_neo4j) else: displayName = user['displayName'] session['uid'] = result session['email'] = email session['img'] = imgurl # During Login, The Image Url Is Updated In Neo4J DB To Avoid The Image Discrepancies From Google Account if imgurl is not None: update_profile_image_url(result, imgurl) if displayName is None: displayName = "" session['displayName'] = displayName return result
industries = pd.read_csv(__location__ + dir + 'industries.txt', sep='\t') industries.index = [i.split('_')[1] for i in industries['CodeTxt']] industry_nodes = set([ i[0] for i in graph.run( "MATCH (a:Industry) RETURN a.exo_code ").to_ndarray() ]) for missing in set(industries.index) - industry_nodes: print('creating', missing, 'industry') match = industries.loc[missing] graph.create( Node("Industry", exo_code=missing, description=match['Name'], exo_CodeNR=match['CodeNr'])) ################################# nodes = dict(([i, countries.where("_.alpha3 = '%s'" % i).first()] for i in set(Aindex[:, 0]))) industrynodes = dict( ([i, industry.where("_.description = '%s'" % i).first()] for i in set(Aindex[:, 1]))) nonzero = np.array(A.nonzero()).T print 'please wait' # do this in parallel
"Ion Battery", "Microprocessor", "Wiring Loom", # "Dioxite", "Gold", # "Silver", "Paraffinium", "Pure Ferrite", "Ionised Cobalt" # "Chlorine", # "Sodium Nitrate", # "Ammonia", # "Phosphorus" ] system = Node("System", name="IvtJen", race="Vy'Keen") graph.merge(system, "System", "name") SELLS = Relationship.type("SELLS") trading = Node("Economy Type", name="Trading") mercantile = Node("Economy", name="Mercantile") shipping = Node("Economy", name="Shipping") rel = Relationship(mercantile, "IS_A_TYPE_OF", trading) graph.merge(rel, "Economy", "name") rel = Relationship(shipping, "IS_A_TYPE_OF", trading) graph.merge(rel, "Economy", "name") power_generation_type = Node("Economy Type", name="Power Generation") power_generation = Node("Economy", name="Power Generation")
relation_syn.append([i,row['name']]) for i in rels: if row[i] != '0': for j in row[i].strip().split('|'): relation[i].append([row['name'],withouttypefind(j)]) if typefind(j) not in conceptclass: conceptclass[typefind(j)] = [] conceptclass[typefind(j)].append(withouttypefind(j)) #create node for key in conceptclass: conceptclass[key] = list(set(conceptclass[key])) count = 0 for key in conceptclass: for n in conceptclass[key]: node = Node(key, name=n) g.create(node) count += 1 def create_relationship(edges, rel_type): count = 0 # 去重处理 set_edges = [] for edge in edges: set_edges.append('###'.join(edge)) for edge in list(set(set_edges)): edge = edge.split('###') p = edge[0] q = edge[1] query = 'match(p),(q) where p.name="%s"and q.name="%s" create (p)-[r:%s{name:"%s"}]->(q)' % (p, q, rel_type, rel_type) #try:
def create_node(cls, workbook): """建立馆室节点""" room_sheet = workbook.sheet_by_index(0) for i in range(1, room_sheet.nrows): row = room_sheet.row_values(i) name = row[0] if row[0].find("_") != -1: name_arr = row[0].split("_") if len(name_arr) >= 3: name = name_arr[2] room_node = Node( cls.room, type=cls.room, name=name, office_name=row[0], variant_name=row[1], position=row[2], service=row[3], open_date=row[4], phone=row[5], monday_open=row[6], monday_borrow=row[7], tuseday_open=row[8], tuseday_borrow=row[9], wednesday_open=row[10], wednesday_borrow=row[11], thursday_open=row[12], thursday_borrow=row[13], friday_open=row[14], friday_borrow=row[15], saturday_open=row[16], saturday_borrow=row[17], sunday_open=row[18], sunday_borrow=row[19], #area=row[20],floor=row[21], certification=row[22]) cls.graph.create(room_node) """建立馆区节点""" building_sheet = workbook.sheet_by_index(1) for i in range(1, building_sheet.nrows): row = building_sheet.row_values(i) name = row[0] #print(name) building_node = Node(cls.building, type=cls.building, name=name, office_name=row[0], variant_name=row[1]) cls.graph.create(building_node) """建立楼层节点""" floor_sheet = workbook.sheet_by_index(3) for i in range(1, floor_sheet.nrows): row = floor_sheet.row_values(i) name = row[0] #print(name) floor_node = Node( cls.floor, type=cls.floor, name=name, office_name=row[0], variant_name=row[1], #area=row[2], upstair=row[3], downstair=row[4]) cls.graph.create(floor_node) """建立资源节点""" resource_sheet = workbook.sheet_by_index(2) for i in range(1, resource_sheet.nrows): row = resource_sheet.row_values(i) name = row[0] if row[0].find("_") != -1: name = row[0].split("_")[1] # print(name) resource_node = Node( cls.resource, type=cls.resource, name=name, office_name=row[0], variant_name=row[1], describe=row[2], count=row[3], #room=row[4], belong=row[5]) cls.graph.create(resource_node)