def processPlayerTeam(playerteam_dict,graph_db,root): teams = set({}) # Add teams to database, connected to root node # (Currently, each team requires a separate HTTP request) team_node_dict = {} for pair in playerteam_dict.values(): team = pair[0] teams.add(team) for team in teams: team_node,relation = graph_db.create({"name":team},rel(root,"TEAM",0)) team_node.add_labels("team") team_node_dict[team] = team_node #Add players to database as a batch batch = neo4j.WriteBatch(graph_db) for player_id, player_tuple in playerteam_dict.iteritems(): team = player_tuple[0] full_name = player_tuple[1] player_node=batch.create({"name":full_name,"player_id":int(player_id)}) batch.create(rel(root,"PLAYER",player_node)) batch.add_labels(player_node,"player") team_node = team_node_dict[team] batch.create(rel(player_node,"PLAYS FOR",team_node)) results=batch.submit()
def whipUp(self): objects = json.load(urlopen(self.api_url))["objects"] for object in objects: args = {} args["ID"]=object["id"] args["NS"]=object["NStatement"] args["DS"]=object["DateStatement"] args["D"]=object["Date"] args["AI"]=object["AddedInfo"] db = self.graph_db.cypher.begin() db.append(self.statement, args) db.commit() enc = getEncumbrance(id=int(args["ID"])) obj = getObject(id=int(object["Obj"]["id"])) docBase = getDocumentBase(id=int(object["DocBase"]["id"])) if obj != None: self.graph_db.create(rel(enc.one, "HAVE_OBJECT", obj.one)) if docBase != None: self.graph_db.create(rel(enc.one, "HAVE_DOCUMENT", docBase.one)) for sp in object["SPerson"]: s_p = getPerson(id=int(sp["id"])) self.graph_db.create(rel(enc.one, "HAVE_DEPTOR", s_p.one)) for wp in object["WPerson"]: w_p = getPerson(id=int(wp["id"])) self.graph_db.create(rel(enc.one, "HAVE_WEIGHT", w_p.one))
def create_operons(self): f = open(self.directory + 'Operons.txt', 'r') data = f.readlines() f.close() i = 0 for line in data: if line[0] == '#': continue chunks = line.split('\t') ### testing if chunks[0] == '' or chunks[1] == '' or chunks[2] == 0: continue if chunks[3] == '': chunks[3] = 'unknown' operon, term, term_rel, org_rel = self.connection.\ create(node({'name': chunks[0], 'start': int(chunks[1]), 'end': int(chunks[2]), 'strand': chunks[3], 'evidence': chunks[6], 'source': 'RegulonDB'}), node({'text': chunks[0]}), rel(0, 'HAS_NAME', 1), rel(0, 'PART_OF', self.ecoli_node)) operon.add_labels('Operon', 'BioEntity', 'DNA') i += 1 logging.info('%d operons were created!' % i)
def create_project_graph(): """Creates a project Graph and stashes it in Neo4j. Returns a tuple of (users, projects, relationships), where each item is a list of the created data. """ # Create some Users user_nodes = [node(name=t[0], username=t[1]) for t in random_users()] users = db.create(*user_nodes) for u in users: # ...and label them as such. u.add_labels("user") # Create some Projects. project_nodes = [node(name=s) for s in random_projects()] projects = db.create(*project_nodes) rels = [] for p in projects: # ...and label them as such. p.add_labels("project") # Set up some relationships. # 1. Give the project a single Owner rels.append(rel((p, "OWNED_BY", random.choice(users)))) # 2. Give the project a random number of contributors. for u in random.sample(users, random.randrange(3, 50)): rels.append(rel((u, "CONTRIBUTES_TO", p))) # Save the relationships rels = db.create(*rels) return (users, projects, rels)
def create(cls, name, *emails): person_node, _ = graph_db.create(node(name=name), rel(cls._root, "PERSON", 0)) for email in emails: graph_db.create(node(email=email), rel(cls._root, "EMAIL", 0), rel(person_node, "EMAIL", 0)) return Person(person_node)
def whipUp(self): objects = json.load(urlopen(self.api_url))["objects"] for object in objects: args = {} args["ID"]=object["id"] args["N"]=object["Name"] args["DR"]=object["DateReg"] #print args db = self.graph_db.cypher.begin() db.append(self.statement, args) db.commit() f = getFilia(args["ID"]) adr = getAddress(id = int(object["Address"]["id"]), Street = object["Address"]["Street"]) #per = getPerson(id = int(object["Person"]["id"]), Name = (object["Person"]["Surname"] + " " + object["Person"]["Name"])) per = getPerson(Name = (object["Person"]["Surname"] + " " + object["Person"]["Name"])) soc = getSocialFormation(id = int(object["SocialFormation"]["id"])) if adr != None: self.graph_db.create(rel(f.one, "HAVE_ADDRESS", adr.one)) if per != None: self.graph_db.create(rel(f.one, "FILIA_HAVE_PERSON", per.one)) if soc != None: self.graph_db.create(rel(f.one, "HAVE_SocialFormation", soc.one))
def populate_graph(node): previous = None for i in range(len(poems["abyat"])): if(i == 0): first_bayt = poems["abyat"][0]["bayt"] first_sadr = poems["abyat"][0]["sadr"] first_ajz = poems["abyat"][0]["ajez"] first = Node("Bayt", name= first_bayt, sudr = first_sadr, ajez = first_ajz) graph_db.create(first) graph_db.create(rel(node,"CONTAINS",first)) print("see me once") if(i == 1): bayt = poems["abyat"][1]["bayt"] sadr = poems["abyat"][1]["sadr"] ajz = poems["abyat"][1]["ajez"] following = Node("Bayt", name = bayt, sudr = sadr, ajez = ajz) graph_db.create(following) graph_db.create(rel(first,"FOLLOWED_BY", following)) print("in 1") if(i > 1): bayt = poems["abyat"][i]["bayt"] sadr = poems["abyat"][i]["sadr"] ajz = poems["abyat"][i]["ajez"] follow = Node("Bayt", name = bayt, sudr = sadr, ajez = ajz) graph_db.create(follow) if(i == 2): graph_db.create(rel(following,"FOLLOWED_BY", follow)) print("2") elif(previous is not None): graph_db.create(rel(previous,"FOLLOWED_BY",follow)) print(i) previous = follow
def create_update_promoters(self): f = open(self.directory + 'All Promoters.txt', 'r') data = f.readlines() f.close() created, updated = [0]*2 for line in data: if line[0] == '#': continue regid, name, strand, tss, sigma, seq, evidence = line.split('\t') tss = int(tss) # skipping incomplete data if '' in [regid, name, strand, tss]: continue query = 'MATCH (ch:Chromosome {name: "%s"})<-[:PART_OF]-' \ '(p:Promoter {tss: %d})-[:PART_OF]->' \ '(o:Organism {name: "%s"}) ' \ 'RETURN p' % (self.chro_name, tss, self.ecoli_name) res = neo4j.CypherQuery(self.connection, query) res_nodes = res.execute() # creating promoter if not res_nodes: promoter, term, rel_org, rel_chr, rel_term = self.connection.create( node({'name': name, 'start': tss, 'end': tss, 'strand': strand, 'tss': tss, 'seq': seq, 'evidence': evidence, 'Reg_id': regid, 'source': 'RegulonDB'}), node({'text': name}), rel(0, 'PART_OF', self.ecoli_node), rel(0, 'PART_OF', self.chro_node), rel(0, 'HAS_NAME', 1)) promoter.add_labels('Promoter', 'Feature', 'BioEntity', 'DNA') term.add_labels('Term') created += 1 else: # one promoter with the tss for record in res_nodes.data: promoter = record.values[0] promoter.update_properties({'seq': seq, 'evidence': evidence, 'Reg_id': regid}) update_source_property(promoter) self.check_create_terms(promoter, name) updated += 1 # duplicates! if len(res_nodes.data) > 1: logging.warning("There are %d nodes for a promoter with " "tss in the %d position! It was skipped!" % (len(res_nodes.data), tss)) logging.info("%d promoters were updated!" % updated) logging.info("%d promoters were created!" % created)
def updateOrCreateTagged(memeDict): """ Description: Checks to see if the img, tag and relationship are in the db if not, it adds them at each level and increments the weighted property to indicate a stronger correlation Params: memeDict, dictionary, keys are urls and values are list of tags Returns: None """ # check to see if the image exists urls = memeDict.keys() for url in urls: pImg = graph_db.get_indexed_node("Img", "imgSrc", url) print "This should be an indexed img node: ", pImg # this is working right now tags = memeDict[url] if str(type(pImg)) != "<type 'NoneType'>": # if so check to see if the tag exists print "This should be a list", tags for tag in memeDict[url]: tagName = tagsExist([tag]) if len(tagName) > 0: pTagNode = graph_db.get_indexed_node("Tags", "tagName", tagName[0]) print "This should be an existing tag: ", pTagNode # if so check to see if there's a relationship pRel = graph_db.match_one(start_node=pImg, rel_type="TAGGED", end_node=pTagNode) print "This should find the relationship or return none: ", pRel # if so increment rel aweight if str(type(pRel)) != "<type 'NoneType'>": getOldWeight = pRel.get_properties() oldWeight = getOldWeight.get("aWeight") newWeight = oldWeight + 1 relProp = pRel.update_properties({"aWeight": newWeight}) print "This should be the old weight: ", oldWeight # else else: # create a relationship newRel = graph_db.create(rel(pImg, ("TAGGED", {"aWeight": 1}), pTagNode)) print "Hopefully this is a relationship: ", newRel # and add the default weight # else else: # create tag node makeNewTagNode = getTagNode(tag) newTagNode = graph_db.get_indexed_node("Tags", "tagName", tag) # and then create a relationship newRel = graph_db.create(rel(pImg, ("TAGGED", {"aWeight": 1}), newTagNode)) # else else: # create image node newImgNode = createImgNode({url: tags}) print "this should be a new img node:", newImgNode
def co_appearance(self, wid_a, wid_b): """two words appearing in the same document. create edge if not any, increment one otherwise""" n_a = self.node_for_word(wid_a) n_b = self.node_for_word(wid_b) r = self.graph_db.match(start_node=n_a, end_node=n_b, bidirectional=True) if len(r) == 0: r0, r1 = self.graph_db.create( rel(n_a, "COAPPEARS", n_b, count=0), rel(n_b, "COAPPEARS", n_a, count=0)) r = [r0, r1] r[0]["count"] += 1 r[1]["count"] += 1
def processClusters(cluster_dict,graph_db,root): for cluster_number in cluster_dict: batch = neo4j.WriteBatch(graph_db) name = "Cluster " + str(cluster_number) cluster_node = batch.create({"name":name,"cluster_number":cluster_number}) batch.create(rel(root,"CLUSTER",cluster_node)) batch.add_labels(cluster_node,"cluster") for pitcher_id in cluster_dict[cluster_number]: for player_node in graph_db.find("player",property_key="player_id",property_value=int(pitcher_id)): batch.create(rel(player_node, "BELONGS TO",cluster_node)) batch.submit()
def create_RBSs(self): f = open(self.directory + 'RBSs.txt', 'r') data = f.readlines() f.close() created = 0 for line in data: if line[0] == '#': continue regid, gene, start, end, strand, center, seq, \ evidence = line.split('\t') ### testing if '' in [regid, strand, start, end] or 0 in [start, end]: continue start, end, center = [int(start), int(end), float(center)] query = 'MATCH (o:Organism {name: "%s"})<-[:PART_OF]-' \ '(g:Gene {strand: "%s"})-[:HAS_NAME]-(t:Term {text: "%s"}) ' \ 'RETURN g' % (self.ecoli_name, strand, gene) res = neo4j.CypherQuery(self.connection, query) res_nodes = res.execute() if not res_nodes: continue elif len(res_nodes.data) == 1: g = res_nodes.data[0].values[0] else: # if there are many genes with the same name, we will # choose the closest by location gene genes = [min(g.values[0]['start'] + center, g.values[0]['end'] + center) for g in res_nodes.data] i = genes.index(min(genes)) g = res_nodes.data[i].values[0] rbs, rel_chr, rel_gene = self.connection.create( node({'evidence': evidence, 'Reg_id': regid, 'source': 'RegulonDB', 'start': start, 'end': end, 'strand': strand, 'seq': seq, 'center_from_tss': center}), rel(0, 'PART_OF', self.chro_node), rel(g, 'CONTAINS', 0)) rbs.add_labels('RBS', 'Feature') created += 1 logging.info('%d RBSs were created!' % created)
def set_configuration(graph_db, node_lst): graph_db.create( rel(node_lst[0], "CONF_TO", node_lst[1], {'flow_id': 1, 'flow_size': 1}), rel(node_lst[1], "CONF_TO", node_lst[2], {'flow_id': 1, 'flow_size': 1}), rel(node_lst[0], "CONF_TO", node_lst[3], {'flow_id': 2, 'flow_size': 1}), rel(node_lst[3], "CONF_TO", node_lst[4], {'flow_id': 2, 'flow_size': 1}), rel(node_lst[4], "CONF_TO", node_lst[5], {'flow_id': 2, 'flow_size': 1}), rel(node_lst[0], "CONF_TO", node_lst[3], {'flow_id': 3, 'flow_size': 1}), rel(node_lst[3], "CONF_TO", node_lst[5], {'flow_id': 3, 'flow_size': 1}), rel(node_lst[2], "CONF_TO", node_lst[4], {'flow_id': 4, 'flow_size': 1}), rel(node_lst[4], "CONF_TO", node_lst[5], {'flow_id': 4, 'flow_size': 1}), )
def titan_insert(): start = datetime.now() die_hard = graph_db.create( node(name="Bruce Willis"), node(name="John McClane"), node(name="Alan Rickman"), node(name="Hans Gruber"), node(name="Nakatomi Plaza"), rel(0, "PLAYS", 1), rel(2, "PLAYS", 3), rel(1, "VISITS", 4), rel(3, "STEALS_FROM", 4), rel(1, "KILLS", 3), ) stop = datetime.now() return stop - start
def create_db(usercol, refcol, start, end): ''' try to generate relationships using a different example from the fundamentals page ''' graph_db = neo4j.GraphDatabaseService("http://localhost:7474/db/data/") rowlist = [] #nodes first for i in range(11, 100): rowlist.append (node(user=usercol[i])) rowlist.append (node(ref = refcol[i])) #relationships second for i in range(11, 100): rowlist.append(rel(start[i], "RECOMMENDED", end[i])) incubate = graph_db.create(*rowlist) #asterisk expands the list & might work better? #gives an error Incomplete Read if you try to do the whole thing at once, but #looks like you can do this in pieces in order to get the whole thing (?) #not sure if this is really necessary, should try +/- the format=pretty part neo4j._add_header('X-Stream', 'true;format=pretty')
def createNode(node_attributes, object_id, objects, elements, graph_db): """ Create in Neo4j the object node and its standalone element nodes Returns the object node reference in Neo4j """ index_field = objects[object_id][objects_config['index_field']] if index_field in node_attributes: object_node = graph_db.get_or_create_indexed_node( 'ID', 'index_field', node_attributes[index_field], node_attributes #{index_field: node_attributes[index_field]} ) #object_node.set_properties(node_attributes) else: object_node, = graph_db.create(node(node_attributes)) object_node.add_labels(objects[object_id][objects_config['label_field']]) # if this object has standalone elements for field_name, value in node_attributes.items(): if (object_id, field_name) in elements: element_attributes = elements[(object_id, field_name)] element_attributes[field_name] = value element_node, = graph_db.create(node(element_attributes)) # label the nodes as elements element_node.add_labels("Element") graph_db.create(rel(object_node, "has element", element_node)) # link the element node to a ses concept linkToSES(element_node, element_attributes, graph_db) return object_node
def test_can_cast_3_args(): casted = rel("Alice", "KNOWS", "Bob") assert isinstance(casted, neo4j.Relationship) assert not casted.bound assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob")
def add_ref(self, source, relationship, dest): """ add relation for two nodes """ batch = neo4j.WriteBatch(self.graph_db) batch.create(rel(source, relationship, dest)) batch.submit()
def test_can_cast_3_tuple(): casted = rel(("Alice", "KNOWS", "Bob")) assert isinstance(casted, neo4j.Relationship) assert not casted.bound assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob")
def load_graph(csvfile, verbose=True): graph_db = database() if verbose: print 'started new graph database' # get the graph database server going. #if you want to delete the database! # cd /usr/local/Cellar/neo4j/1.9.4/libexec/data # os.command(rm -R graph_db) # this will store in usr/local/Cellar/neo4j/community-1.9.2-unix/libexec/data #make sure graph DB initialized print 'Graph Version: ' + str(graph_db.neo4j_version) csvfile = open(csvfile) reader = csv.reader(csvfile, delimiter=',') nodes = {} # keep track of nodes already in graph_db. def get_or_create_node(graph_db, name): if name not in nodes: nodes[name], = graph_db.create( node(name=name)) #make the node if it doesn't exist return nodes[name] #return the node print 'Loading graph into database...' for row in reader: parent = get_or_create_node(graph_db, row[0]) child = get_or_create_node(graph_db, row[1]) parent_child, = graph_db.create(rel(parent, "--", child)) print 'Loaded graph into database' pickle.dump(nodes, open("nodes.p", "wb"))
def test_can_cast_3_tuple(): casted = rel(("Alice", "KNOWS", "Bob")) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob"
def createAnalysis(self, target_uuid=None, target_genid=None, data_link=None, results_uuid=None, description_uuid=None): if (target_uuid is None) == (target_genid is None): raise Exception( "Exactly one of the following parameters must be specified: target_uuid, target_genid " ) if target_genid is not None: target_node = self._getNodeByGenid(label=BIOENTITY, genid=target_genid) else: target_node = self._getNodeByUUID(label=RAW_DATA, uuid=target_uuid) wbatch = neo4j.WriteBatch(self.gdb) params = {} params['uuid'] = self._getNewUUID() if data_link is not None: params['data_link'] = data_link if results_uuid is not None: params['results_uuid'] = results_uuid if description_uuid is not None: params['description_uuid'] = description_uuid analysis_node = wbatch.create(node(params)) wbatch.add_labels(analysis_node, ANALYSIS) wbatch.create(rel(target_node, HAS_ANALYSIS, analysis_node)) wbatch.submit() return params['uuid']
def reblogs_into_db(reblogs): graph_db = neo4j.GraphDatabaseService("http://localhost:7474/db/data/") people = graph_db.get_or_create_index(neo4j.Node, "People") p_regex = u"([a-z0-9-]*) posted this" rb_regex = u"([a-z0-9-]*) reblogged this from ([a-z0-9-]*)( and added:(.*)$)?" for reblog in reblogs: p_match = re.search(p_regex, reblog) rb_match = re.search(rb_regex, reblog) if p_match: poster = graph_db.get_or_create_indexed_node( "People", "name", p_match.group(1), properties={"name": p_match.group(1)}) poster.add_labels("poster") elif rb_match: reblogger = graph_db.get_or_create_indexed_node( "People", "name", rb_match.group(1), properties={"name": rb_match.group(1)}) reblogger.add_labels("reblogger") source = graph_db.get_or_create_indexed_node( "People", "name", rb_match.group(2), properties={"name": rb_match.group(2)}) source.add_labels("source") if rb_match.group(3): properties = {"comment": rb_match.group(3)} else: properties = {} graph_db.create( rel(reblogger, ("Reblogged_from", properties), source))
def createRelationships(): global relationships graph = Graph('http://localhost:7474/db/data') for r in relationships: NodeA = graph.find_one(r["start"]["collection"],property_key = "_id", property_value = str(r["start"]["_id"])) NodeB = graph.find_one(r["end"]["collection"],property_key = "_id", property_value = str(r["end"]["_id"])) graph.create(rel(NodeA,r["name"],NodeB))
def process_single_file_data(jsonArray): data = json.loads(jsonArray) issueevents = filter(is_issueevent, data) for ie in issueevents: # print ie eventType = ie["type"] repoId = ie["repo"]["id"] repoName = ie["repo"]["name"] issueId = ie["payload"]["issue"]["id"] issueTitle = ie["payload"]["issue"]["title"] issueAction = ie["payload"]["action"] issueCreatedAt = ie["payload"]["issue"]["created_at"] # print eventType # print "repository:", repoId, repoName # print "issue:", issueId, issueAction, issueCreatedAt #remove null value and non-primitive values as properties cannot contain nested objects issueProperties = top_level_properties(ie['payload']['issue']) # print issueProperties # print issueProperties["id"], issueProperties['action'] repoNode = db.get_or_create_indexed_node(index_name="issueevents", key=repoId, value=repoName, properties=top_level_properties(ie['repo'])) repoNode.add_labels("REPOSITORY") issueNode = db.get_or_create_indexed_node(index_name="issueevents", key=issueId, value=issueTitle, properties=issueProperties) issueNode.add_labels("ISSUE") db.create(rel(repoNode, ("ISSUE_" + issueAction, {"issue_created_at": issueCreatedAt, "issue_action":issueAction}), issueNode)) print "created relationship: " + "ISSUE_" + str(issueAction) + "\tbetween reposirotyNode: " + str(repoId) + "\t and IssueNode: " + str(issueId)
def load(csvfile,verbose = True): graph_db = database() if verbose: print 'started new graph database' # get the graph database server going. #if you want to delete the database! # cd /usr/local/Cellar/neo4j/1.9.4/libexec/data # os.command(rm -R graph_db) # this will store in usr/local/Cellar/neo4j/community-1.9.2-unix/libexec/data #make sure graph DB initialized print 'Graph Version: ' + str(graph_db.neo4j_version) csvfile = open(csvfile) reader = csv.reader(csvfile,delimiter=',') nodes = {} # keep track of nodes already in graph_db. def get_or_create_node(graph_db, name): if name not in nodes: nodes[name], = graph_db.create(node(name=name)) #make the node if it doesn't exist return nodes[name] #return the node print 'Loading graph into database...' for row in reader: parent = get_or_create_node(graph_db, row[0]) child = get_or_create_node(graph_db, row[1]) parent_child, = graph_db.create(rel(parent, "--", child)) print 'Loaded graph into database' pickle.dump(nodes, open("nodes.p", "wb" ) )
def test_can_cast_3_args(self): casted = rel("Alice", "KNOWS", "Bob") assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract() assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob"
def priklad(graph_db): # attach to a local graph database service graph_db.clear() # create two nodes and a relationship between them # (Alice)-[:KNOWS]->(Bob) alice, bob, ab = graph_db.create( node(name="Alice"), node(name="Bob"), rel(0, "KNOWS", 1) ) # build a Cypher query and related parameters query = ( "START a = node({A}) " "MATCH (a)-[:KNOWS]->(b) " "RETURN a, b" ) params = {"A": alice.id} # define a row handler def print_row(row): print("imam ", row) a, b = row print(a["name"] + " knows " + b["name"]) # execute the query x = cypher.execute(graph_db, query, params, row_handler=print_row) print(x)
def test_can_cast_3_tuple(): casted = rel(("Alice", "KNOWS", "Bob")) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob")
def ss(arr, label, parents=None): i = 0 for a in arr: n, = db().create(node(a)) n.add_labels(label) if parents: db().create(rel((n, "realate_to", parents[i]))) i+=1
def createRel(self, n1, relation, n2): rel_hash = self.relHash(n1, relation, n2) r = rel(n1, relation, n2, {'rel_type':relation,'id_hash':rel_hash}) self.relBatch.create(r) self.incBC('rel') tmp = self.relBatch.create(r) self.relBatch.add_to_index(neo4j.Relationship, 'announce', 'key', rel_hash, tmp) self.incBC('rel')
def ss(arr, label, parents=None): i = 0 for a in arr: n, = db().create(node(a)) n.add_labels(label) if parents: db().create(rel((n, "realate_to", parents[i]))) i += 1
def test_can_cast_4_args(): casted = rel("Alice", "KNOWS", "Bob", {"since": 1999}) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999
def test_can_cast_rel(graph): a, b, ab = graph.create({}, {}, (0, "KNOWS", 1)) casted = rel(ab) assert isinstance(casted, neo4j.Relationship) assert not casted.is_abstract assert casted.start_node == a assert casted.type == "KNOWS" assert casted.end_node == b
def test_can_cast_3_tuple_with_unbound_rel(): casted = rel(("Alice", ("KNOWS", {"since": 1999}), "Bob")) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert casted["since"] == 1999
def test_can_cast_3_tuple_with_unbound_rel(): casted = rel(("Alice", ("KNOWS", {"since": 1999}), "Bob")) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999
def test_can_cast_4_args(): casted = rel("Alice", "KNOWS", "Bob", {"since": 1999}) assert isinstance(casted, neo4j.Relationship) assert not casted.bound assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999
def test_can_cast_3_args_with_mid_tuple(): casted = rel("Alice", ("KNOWS", {"since": 1999}), "Bob") assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999
def test_can_cast_4_args(): casted = rel("Alice", "KNOWS", "Bob", {"since": 1999}) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert casted["since"] == 1999
def test_can_cast_3_args_with_mid_tuple(): casted = rel("Alice", ("KNOWS", {"since": 1999}), "Bob") assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert casted["since"] == 1999
def test_can_cast_4_args(self): casted = rel("Alice", "KNOWS", "Bob", "Friendship") assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract() assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert "Friendship" in casted._labels
def test_can_cast_kwargs(): casted = rel("Alice", "KNOWS", "Bob", since=1999) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999
def register(self, username, email, password): u = user.get_or_create("username", username, { "username": username, "email": email, "password": password, }) graph_db.create( rel( u, "FOLLOWED", u ) ) return u
def test_can_cast_kwargs(): casted = rel("Alice", "KNOWS", "Bob", since=1999) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert casted["since"] == 1999
def test_can_cast_4_tuple(): casted = rel(("Alice", "KNOWS", "Bob", {"since": 1999})) assert isinstance(casted, neo4j.Relationship) assert not casted.bound assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999
def test_can_cast_4_tuple(self): casted = rel(("Alice", "KNOWS", "Bob", {"since": 1999})) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract() assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert casted["since"] == 1999
def check_create_terms(self, bioentity, name): if not isinstance(bioentity, gb.neo4j.Node): raise TypeError('The node argument must be an object of neo4j.Node class!') if bioentity['name'] != name: term, rel_pro = self.connection.create( node({'text': name}), rel(0, 'HAS_NAME', bioentity)) term.add_labels('Term')
def test_can_cast_3_args_with_mid_tuple_and_props(): casted = rel("Alice", ("KNOWS", {"since": 1999}), "Bob", foo="bar") assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999 assert casted["foo"] == "bar"
def test_can_cast_4_args_and_props(): casted = rel("Alice", "KNOWS", "Bob", {"since": 1999}, foo="bar") assert isinstance(casted, neo4j.Relationship) assert not casted.bound assert casted.start_node == neo4j.Node("Alice") assert casted.type == "KNOWS" assert casted.end_node == neo4j.Node("Bob") assert casted["since"] == 1999 assert casted["foo"] == "bar"
def test_can_cast_rel(self): graph_db = neo4j.GraphDatabaseService() a, b, ab = graph_db.create({}, {}, (0, "KNOWS", 1)) casted = rel(ab) assert isinstance(casted, neo4j.Relationship) assert not casted.is_abstract() assert casted.start_node == a assert casted.type == "KNOWS" assert casted.end_node == b
def test_can_cast_args_and_kwargs(self): casted = rel("Alice", "KNOWS", "Bob", "Friendship", since=1999) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract() assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert "Friendship" in casted._labels assert casted["since"] == 1999
def like(user_id, id): n = user_node(user_id) likes = list(db().match(rel_type='like', end_node=by_id(id), start_node=n)) is_liked = len(likes) if is_liked == 0: db().create(rel((n, "like", by_id(id)))) return 1, len(list(db().match(rel_type='like', end_node=by_id(id)))) else: likes[0].delete() return -1, len(list(db().match(rel_type='like', end_node=by_id(id))))
def test_can_use_return_values_as_references(graph): batch = WriteBatch(graph) a = batch.create(node(name="Alice")) b = batch.create(node(name="Bob")) batch.create(rel(a, "KNOWS", b)) results = batch.submit() ab = results[2] assert isinstance(ab, Relationship) assert ab.start_node["name"] == "Alice" assert ab.end_node["name"] == "Bob"
def test_can_cast_3_tuple_with_unbound_rel(self): casted = rel(("Alice", ("KNOWS", {"Friendship"}, { "since": 1999 }), "Bob")) assert isinstance(casted, neo4j.Relationship) assert casted.is_abstract() assert casted.start_node == "Alice" assert casted.type == "KNOWS" assert casted.end_node == "Bob" assert casted._labels == {"Friendship"} assert casted["since"] == 1999
def addRawDataTargetGenid_batch(self, raw_data_uuid, target_genid_list): target_nodes = self._getNodeByGenid_batch(label=BIOENTITY, genid_list=target_genid_list) raw_data_node = self._getNodeByUUID(label=RAW_DATA, uuid=raw_data_uuid) wbatch = neo4j.WriteBatch(self.gdb) genids_not_found = [] for genid, t in target_nodes.iteritems(): if t is not None: wbatch.create(rel(t, HAS_RAW_DATA, raw_data_node)) else: genids_not_found.append(genid) wbatch.submit() return genids_not_found
def test_create_function(self): self.batch.create(node(name="Alice")) self.batch.create(node(name="Bob")) self.batch.create(rel(0, "KNOWS", 1)) alice, bob, ab = self.batch.submit() assert isinstance(alice, Node) assert alice["name"] == "Alice" assert isinstance(bob, Node) assert bob["name"] == "Bob" assert isinstance(ab, Relationship) assert ab.start_node == alice assert ab.type == "KNOWS" assert ab.end_node == bob self.recycling = [ab, alice, bob]
def createRawData(self, target_genid=None, data_link=None): wbatch = neo4j.WriteBatch(self.gdb) params = {} params['uuid'] = self._getNewUUID() if data_link is not None: params['data_link'] = data_link raw_data_node = wbatch.create(node(params)) wbatch.add_labels(raw_data_node, RAW_DATA) if target_genid: target_node = self._getNodeByGenid(label=BIOENTITY, genid=target_genid) wbatch.create(rel(target_node, HAS_RAW_DATA, raw_data_node)) wbatch.submit() return params['uuid']
def graph_clusters(clusters): graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/') nodes = [node(title=cluster.title(), pages=[child.title for child in cluster.children]) \ for cluster in clusters] relations = [] num_clusters = len(clusters) for i in range(0, num_clusters): for j in range(i + 1, num_clusters): if clusters[i].connects_to(clusters[j]): print('%s connects to %s' % (clusters[i].title(), clusters[j].title())) relations.append(rel(i, 'LINKS TO', j)) # if clusters[j].connects_to(clusters[i]): # relations.append(rel(j, 'LINKS TO', i)) graph_db.create(*(nodes + relations))
def linkToSES(node, attributes, graph_db): """ Find the ses class for the given element node. Create a new ses node if not exist, link the element node to it. """ ses_class = attributes[elements_config['ses_class']] if ses_class is None or ses_class == '': print 'Error: node ' + attributes + ' doesn\'t have ses class' sys.exit(0) ses_node = graph_db.get_or_create_indexed_node( 'SES', # index name 'ses_class', # index key ses_class, # index value {elements_config['ses_class']: ses_class}) ses_node.set_labels('ses_class') graph_db.create(rel(node, "belongs to", ses_node))