def obj_get(self, bundle, **kwargs): gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) lemma = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/') # ge the data of the word new_obj = DataObject(kwargs['pk']) new_obj.__dict__['_data'] = lemma.properties new_obj.__dict__['_data']['id'] = kwargs['pk'] # get the values values = lemma.relationships.outgoing(types=["values"]) valuesArray = [] for v in range(0, len(values), 1): val = values[v].end val.properties['resource_uri'] = API_PATH + 'word/' + str(val.id) + '/' val.properties['translations'] = [] # get the full translation # force API into full representation if cache is enabled if bundle.request.GET.get('full'): translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" + val.properties['CTS'] + """' RETURN DISTINCT w ORDER BY ID(w)""") translationArray = [] for t in translations: trans = t[0] transurl = trans['self'].split('/') trans['data']['resource_uri'] = API_PATH + 'word/' + transurl[len(transurl)-1] + '/' translationArray.append(trans['data']) val.properties['translations'] = translationArray valuesArray.append(val.properties) new_obj.__dict__['_data']['values'] = valuesArray return new_obj
def getAllNodesAndRelations(): db1 = GraphDatabase("http://localhost:7474/db/data/") q = '''START n=node(*) MATCH n-[r]->m RETURN n,r,m''' results = db1.query(q,returns=(client.Node, unicode, client.Relationship)) print len(results) graph = defaultdict() startnode = [] endnode = [] rel = [] for i in xrange(len(results)): for word in results[i]: if word.__class__.__name__ == 'unicode': json1_str = str(word) rel.append(getRelType(json1_str)) if word.__class__.__name__ == 'Node': startnode.append(str(word.properties['name'])) if word.__class__.__name__ == 'Relationship': endnode.append(str(word.properties['name'])) for i in xrange(len(startnode)): graph[(startnode[i],endnode[i])] = rel[i] for word in graph: print word,graph[word] return graph
def obj_get(self, bundle, **kwargs): gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) document = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/') new_obj = DataObject(kwargs['pk']) new_obj.__dict__['_data'] = document.properties new_obj.__dict__['_data']['id'] = kwargs['pk'] new_obj.__dict__['_data']['user'] = str(document.relationships.incoming(types=["owns"])[0].start.properties['username']) sentences = gdb.query("""MATCH (u:`User`)-[:owns]->(d:`UserDocument`)-[:sentences]->(s:`UserSentence`) WHERE d.CTS='""" +document.properties['CTS']+ """' RETURN DISTINCT s ORDER BY ID(s)""") sentenceArray = [] for s in sentences: sent = s[0] url = sent['self'].split('/') # this might seems a little hacky, but API resources are very decoupled, # which gives us great performance instead of creating relations amongst objects and referencing/dereferencing foreign keyed fields sent['data']['resource_uri'] = API_PATH + 'user_sentence/' + url[len(url)-1] + '/' sentenceArray.append(sent['data']) new_obj.__dict__['_data']['sentences'] = sentenceArray # get a dictionary of related translations of this document relatedDocuments = gdb.query("""MATCH (d:`UserDocument`)-[:sentences]->(s:`UserSentence`)-[:words]->(w:`Word`)-[:translation]->(t:`Word`)<-[:words]-(s1:`Sentence`)<-[:sentences]-(d1:`Document`) WHERE HAS (d.CTS) AND d.CTS='""" + document.properties['CTS'] + """' RETURN DISTINCT d1 ORDER BY ID(d1)""") new_obj.__dict__['_data']['translations']={} for rd in relatedDocuments: doc = rd[0] url = doc['self'].split('/') if doc['data']['lang'] in CTS_LANG: new_obj.__dict__['_data']['translations'][doc['data']['lang']] = doc['data'] new_obj.__dict__['_data']['translations'][doc['data']['lang']]['resource_uri']= API_PATH + 'document/' + url[len(url)-1] +'/' return new_obj
def obj_get(self, bundle, **kwargs): gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) word = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/') # ge the data of the word new_obj = DataObject(kwargs['pk']) new_obj.__dict__['_data'] = word.properties new_obj.__dict__['_data']['id'] = kwargs['pk'] new_obj.__dict__['_data']['sentence_resource_uri'] = API_PATH + 'sentence/' + str(word.relationships.incoming(types=["words"])[0].start.id) + '/' # get the lemma lemmaRels = word.relationships.incoming(types=["values"]) if len(lemmaRels) > 0: new_obj.__dict__['_data']['lemma_resource_uri'] = API_PATH + 'lemma/' + str(lemmaRels[0].start.id) + '/' translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" +word.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""") translationArray = [] for t in translations: trans = t[0] url = trans['self'].split('/') trans['data']['resource_uri'] = API_PATH + 'word/' + url[len(url)-1] + '/' translationArray.append(trans['data']) new_obj.__dict__['_data']['translations'] = translationArray return new_obj
def buildings(request): status={} if request.method == 'GET': gdb = GraphDatabase(NEO4J_HOST,NEO4J_USERNAME,NEO4J_PASSWORD) #gdb = GraphDatabase("http://localhost:7474", username="******", password="******") #building = gdb.labels.get('Building') #building.all() q = """MATCH (n:Building) return n.name, n.address, n.certification, n.leed_id""" results = gdb.query(q=q) buildings = [] for building in results: building_info = {} building_info['name'] = building[0] building_info['address'] = building[1] building_info['certification'] = building[2] building_info['leed_id'] = building[3] buildings.append(building_info) status.update({'buildings': buildings}) status.update({'status': 'Success'}) return HttpResponse(json.dumps(status),content_type="application/json") else: status.update({'buildings': ''}) status.update({'status': 'Invalid Request'}) return HttpResponse(json.dumps(status))
def getSubgraph(): op={"nodes":[],"links":[]} nodes=[] db1 = GraphDatabase("http://localhost:7474/") q1 = ' '.join(['MATCH n-[r]->m','WHERE n.name="batman"','RETURN n,r,m;']) q2 = ' '.join(['MATCH n-[r]->m WHERE n.name="batman"','WITH n,r,m MATCH q-[r2]->p','WHERE n-[r]->q AND n-[r]->p','RETURN q,r2,p limit 200;']) print "starting" results1=db1.query(q1,returns=(client.Node, client.Relationship, client.Node)) print "HERE" for result in results1: n1=result[0].properties['name'] n2=result[2].properties['name'] try: i1=nodes.index(n1) except: nodes.append(n1) i1=nodes.index(n1) op["nodes"].append({"name":n1}) try: i2=nodes.index(n2) except: nodes.append(n2) i2=nodes.index(n2) op["nodes"].append({"name":n2}) r = result[1].type op["links"].append({"source":i1,"target":i2,"type":r}) print op results2 = db1.query(q2,returns=(client.Node, client.Relationship, client.Node)) print "THERE!" for result in results2: n1=result[0].properties['name'] n2=result[2].properties['name'] #try: i1=nodes.index(n1) """ except: nodes.append(n1) i1=nodes.index(n1) op["nodes"].append({"name":n1}) """ #try: i2=nodes.index(n2) """ except: nodes.append(n2) i2=nodes.index(n2) op["nodes"].append({"name":n2}) """ r = result[1].type op["links"].append({"source":i1,"target":i2,"type":r}) print op json.dump(op,open('subgraph.json','w'))
def obj_get(self, bundle, **kwargs): # query parameters (optional) for short sentence approach attrlist = ['CTS', 'length', 'case', 'dialect', 'head', 'form', 'posClass', 'cid', 'gender', 'tbwid', 'pos', 'value', 'degree', 'number','lemma', 'relation', 'isIndecl', 'ref', 'posAdd', 'mood', 'tense', 'voice', 'person'] query_params = {} for obj in bundle.request.GET.keys(): if obj in attrlist and bundle.request.GET.get(obj) is not None: query_params[obj] = bundle.request.GET.get(obj) elif obj.split('__')[0] in attrlist and bundle.request.GET.get(obj) is not None: query_params[obj] = bundle.request.GET.get(obj) gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) sentence = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/') documentNode = sentence.relationships.incoming(types=["sentences"])[0].start # get the sentence parameters new_obj = DataObject(kwargs['pk']) new_obj.__dict__['_data'] = sentence.properties new_obj.__dict__['_data']['id'] = kwargs['pk'] new_obj.__dict__['_data']['document_resource_uri'] = API_PATH + 'user_document/' + str(sentence.relationships.incoming(types=["sentences"])[0].start.id) + '/' new_obj.__dict__['_data']['user'] = str(documentNode.relationships.incoming(types=["owns"])[0].start.properties['username']) # get a dictionary of related translation of this sentence # shall this be more strict (only user) relatedSentences = gdb.query("""MATCH (s:`UserSentence`)-[:words]->(w:`Word`)-[:translation]->(t:`Word`)<-[:words]-(s1:`Sentence`) WHERE HAS (s.CTS) AND s.CTS='""" + sentence.properties['CTS'] + """' RETURN DISTINCT s1 ORDER BY ID(s1)""") new_obj.__dict__['_data']['translations']={} for rs in relatedSentences: sent = rs[0] url = sent['self'].split('/') for lang in CTS_LANG: if sent['data']['CTS'].find(lang) != -1: new_obj.__dict__['_data']['translations'][lang] = API_PATH + 'sentence/' + url[len(url)-1] +'/' # get the words and related information words = gdb.query("""MATCH (d:`UserSentence`)-[:words]->(w:`Word`) WHERE d.CTS='""" +sentence.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""") wordArray = [] for w in words: word = w[0] url = word['self'].split('/') word['data']['resource_uri'] = API_PATH + 'word/' + url[len(url)-1] + '/' wordNode = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + url[len(url)-1] + '/') # get the full translation if bundle.request.GET.get('full'): translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" +wordNode.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""") translationArray = [] for t in translations: trans = t[0] transurl = trans['self'].split('/') trans['data']['resource_uri'] = API_PATH + 'word/' + transurl[len(transurl)-1] + '/' translationArray.append(trans['data']) word['data']['translations'] = translationArray wordArray.append(word['data']) new_obj.__dict__['_data']['words'] = wordArray return new_obj
def extract_node_features(nodes, multiclass=False): X = [] Y = [] index_map = {} gdb = GraphDatabase('http://ec2-54-187-76-157.us-west-2.compute.amazonaws.com:7474/db/data/') for i, node in enumerate(nodes): # phi = [handle_length, num_non_alpha in handle, belief, num_links, |indicators for source urls|] phi = [] node_handle = node['node_handle'] # handle_length phi.append(len(node_handle)) # num_non_alpha characters phi.append(len([c for c in node_handle if not c.isalpha()])) q = 'MATCH (n{handle:' + node_handle + '})-[r]-(x) RETURN r, n, x' links = gdb.query(q=q) source_urls = set() belief = 0 neighbor_beliefs = [] for link in links: s_url = link[0]['data']['source_url'] source_urls.add(s_url) try: belief = link[1]['data']['belief'] except KeyError: pass #belief phi.append(belief) # num_links phi.append(len(links)) # indicator variables for urls for source in GRAPH_SOURCES: if source in source_urls: phi.append(1) else: phi.append(0) action_type = node['action_type'] if not multiclass: # binary classification, 'GOOD_NODE' = 1 if action_type == "'GOOD_NODE'": Y.append(1) else: Y.append(2) else: # multiclass classification if action_type == "'GOOD_NODE'": Y.append(1) elif action_type == "'REMOVE_NODE'": Y.append(2) elif action_type == "'SPLIT_NODE'": Y.append(3) elif action_type == "'RENAME_NODE'": Y.append(4) else: print action_type index_map[node['id_node']] = i X.append(phi) return X, Y, index_map
def execute(): url = 'http://ec2-54-211-27-90.compute-1.amazonaws.com:8080/db/data/' gdb = GraphDatabase(url) query = "start n=node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() and n.postedTime + 3600000 > timestamp() - 24*60*60*1000 return \"0\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 2*24*60*60*1000 return \"-1\" as Day, n as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 2*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 3*24*60*60*1000 return \"-2\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 3*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 4*24*60*60*1000 return \"-3\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 4*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 5*24*60*60*1000 return \"-4\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 5*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 6*24*60*60*1000 return \"-5\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 6*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 7*24*60*60*1000 return \"-6\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 7*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 8*24*60*60*1000 return \"-7\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 8*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 9*24*60*60*1000 return \"-8\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 9*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 10*24*60*60*1000 return \"-9\" as Day, count(n) as Tweets" results = gdb.query(query).get_response() print results
def least_recently(self, request, **kwargs): data = {} data['time_ranking'] = [] gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) time = {} # process time of grammar of submissions of a user gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username = '******' RETURN s""") # get the current time unix = datetime(1970,1,1) # get the accuray per ref key for sub in submissions.elements: try: if len(sub[0]['data']['ref']) == 0: return self.error_response(request, {'error': 'Reference keys are necessary for calculating averaged lesson progress.'}, response_class=HttpBadRequest) t = dateutil.parser.parse(sub[0]['data']['timestamp']) t = t.replace(tzinfo=None) diff = (t-unix).total_seconds() try: time[sub[0]['data']['ref']].append(diff) except KeyError as k: time[sub[0]['data']['ref']] = [] time[sub[0]['data']['ref']].append(diff) except KeyError as k: continue # calculate the averages and sort by it average = {} for ref in time.keys(): average[ref] = 0.0 for value in time[ref]: average[ref] = average[ref] + value av = average[ref]/len(time[ref]) av = datetime.fromtimestamp(int(av)).strftime('%Y-%m-%d %H:%M:%S') av = av.replace(' ', 'T') average[ref] = av sorted_dict = sorted(average.iteritems(), key=operator.itemgetter(1)) #sorted_reverse = sorted_dict.reverse() for entry in sorted_dict: data['time_ranking'].append({'ref': entry[0], 'average': average[entry[0]], 'title': Grammar.objects.filter(ref=entry[0])[0].title, 'query': Grammar.objects.filter(ref=entry[0])[0].query}) #return the json return self.create_response(request, data)
def get(self, params={}): depth = params.get("depth", 3) q = params.get("q", "artist_10165") query = u"""start n=node:node_auto_index(id="{}") match n-[r*1..{}]-m return m,r""".format(q, depth) print query from neo4jrestclient.client import GraphDatabase, Node, Relationship gdb = GraphDatabase("http://localhost:7474/db/data") res = gdb.query(q=query, returns=(Node, Relationship)) return res
def getAllNodes(): db1 = GraphDatabase("http://localhost:7474/db/data/") q = '''START n=node(*) RETURN n LIMIT 5''' results = db1.query(q,returns=client.Node) nodes = [] print len(results) for i in xrange(len(results)): for word in results[i]: nodes.append(str(word.properties['name'])) return nodes
def UpsertItem(itemid): db = GraphDatabase("http://localhost:7474/db/data/") item_query = "START ee=node(*) WHERE ee.itemid! = \"" + itemid + "\" RETURN ee;" result = db.query(q=item_query, returns=(client.Node, unicode, client.Relationship)) if len(result) == 0: item = db.nodes.create(itemid=itemid) else: for node in result: item = node.pop() return item
def UpsertUser(userid): db = GraphDatabase("http://localhost:7474/db/data/") user_query = "START ee=node(*) WHERE ee.userid! = \"" + userid + "\" RETURN ee;" result = db.query(q=user_query, returns=(client.Node, unicode, client.Relationship)) if len(result) == 0: user = db.nodes.create(userid=userid) else: for node in result: user = node.pop() return user
def getFoes(name): db1 = GraphDatabase("http://localhost:7474/db/data/") q = '''MATCH (n { name: \''''+name+'''\'})-[r]->m WHERE type(r) = 'FOE' RETURN n,r,m''' results = db1.query(q,returns=(client.Node, unicode, client.Relationship)) endnode = [] for i in xrange(len(results)): for word in results[i]: if word.__class__.__name__ == 'Relationship': endnode.append(str(word.properties['name'])) print endnode return endnode
def calculateKnowledgeMap(self, user): gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username = '******' RETURN s""") #filename = os.path.join(os.path.dirname(__file__), '../static/json/ref.json') #fileContent = {} #with open(filename, 'r') as json_data: #fileContent = json.load(json_data); json_data.close() vocab = {} ref = {} lemmas = {} lemmaFreq = 0 # flatten the ref and collect the vocab knowledge for sub in submissions.elements: try: for word in sub[0]['data']['encounteredWords']: try: vocab[word] = vocab[word]+1 except KeyError as k: vocab[word] = 1 # if vocab appears first time, get the lemmas frequency (two vocs can have same lemma, so save lemma as key) try: lemma = gdb.query("""MATCH (l:`Lemma`)-[:values]->(n:`Word`) WHERE n.CTS = '""" + word + """' RETURN l.value, l.frequency""") if lemma.elements[0][0] is not None and lemma.elements[0][0] != "": lemmas[lemma.elements[0][0]] = lemma.elements[0][1] # in case of weird submission test data for encounteredWords except IndexError as i: continue if sub[0]['data']['ref'] not in ref: # get the morph info via a file lookup of submission's ref key, save params to test it on the words of the work #ref[sub[0]['data']['ref']] = grammar[sub[0]['data']['ref']] try: params = {} grammar = Grammar.objects.filter(ref=sub[0]['data']['ref'])[0].query.split('&') for pair in params: params[pair.split('=')[0]] = pair.split('=')[1] ref[sub[0]['data']['ref']] = params except IndexError as k: continue except KeyError as k: continue # get the lemma/vocab overall count for freq in lemmas: lemmaFreq = lemmaFreq + int(lemmas[freq]) return [vocab, ref, lemmas, lemmaFreq]
def dehydrate(self, bundle): links = [] gdb = GraphDatabase("http://localhost:7474/db/data/") query = """START a = node:`kmap-Concept`(label = "%s") MATCH a<-[:concepts]-b-[:concepts]->c RETURN b, c; """ % bundle.data["label"] data = gdb.query(q=query, returns=(Node, Node)) for datum in data: links.append({"type" : datum[0]["type"], "label" : datum[1]["label"]}) bundle.data["links"] = links return bundle
def UpsertRating(user, item, rating): db = GraphDatabase("http://localhost:7474/db/data/") rating_query = "START a = node(" + str(user.id) + ") MATCH a-[r]-b WHERE b.itemid=\"" rating_query += str(item.properties["itemid"]) + "\" RETURN r;" params = {} result = db.query(rating_query, params=params, returns=(client.Node, unicode, client.Relationship)) if len(result) == 0: user.relationships.create("Rated", item, rating=rating) else: for rel in result: r = rel.pop() if r["rating"] != rating: r["rating"] = rating
def __init__(self,url): self.gdb = GraphDatabase(url) self.graph = {} self.graph["nodes"] = [] self.graph["links"] = [] self.ENTITY = 1 self.CATEGORY = 2
class StorageTest(TestCase): def setUp(self): self.graph = GraphDatabase(neo4j_url) def tearDown(self): storage.clear() def test_should_insert_person_into_graph_db_as_node(self): patricia = Person("Nekesa", "Patricia", datetime.now(), gender.FEMALE) storage.add_person(patricia) people = self.query('match (node) return node;') self.assertEqual(people[0], patricia.jsonify()) def test_should_provide_all_people_in_graph(self): patricia = Person("Nekesa", "Patricia", datetime.now(), gender.FEMALE) jesse = Person("Wejuli", "Jesse", datetime.now(), gender.MALE) storage.add_person(patricia) storage.add_person(jesse) people = storage.all() self.assertEqual(people, [patricia.jsonify(), jesse.jsonify()]) def query(self, query_string): query_sequence = self.graph.query(query_string) results = [] for element in query_sequence.elements: results.append(element[0]['data']) return results
def getRelationship(name1,name2): db1 = GraphDatabase("http://localhost:7474/db/data/") q = '''MATCH (n { name: \''''+name1+'''\'})-[r]->(m { name: \''''+name2+'''\'}) RETURN n,r,m''' results = db1.query(q,returns=(client.Node, unicode, client.Relationship)) rel = [] for i in xrange(len(results)): for word in results[i]: if word.__class__.__name__ == 'unicode': json1_str = str(word) rel.append(getRelType(json1_str)) break if(len(rel)>=1): print rel[0] return rel[0] return 0
def shortest_path(origin,destiny): db = GraphDatabase("http://localhost:7474/db/data/") q = lambda origin, destiny: """MATCH (from), (to) , path = (from)-[:DOMAIN_LINK*]->(to) WHERE from.prop_node_id='%s' AND to.prop_node_id='%s' RETURN path AS shortestPath, reduce(distance = 0, r in relationships(path) | distance+r.STFIPS1) AS totalDistance, nodes(path) ORDER BY totalDistance ASC LIMIT 1"""%(origin,destiny) path = db.query(q(origin,destiny))[0] if path==[]: print "There is no path" return path names = [x['data']['prop_node_id'] for x in path[2]] print 'Cheaper path: '+' - '.join(names) return path
def prepare(fromUser): db = GraphDatabase("http://localhost:7474", username="******", password="******") nodesQuery = "MATCH (n) RETURN n.name" nodeValues = db.query(nodesQuery, data_contents=True) inputList = [] input = [] for i in range(0,len(nodeValues)): inputList.append(nodeValues[i][0]) #print inputList[i] for item in fromUser: if item in inputList: input.append(item) return input
def post_list(self, request, **kwargs): """ Create a new document object and return it if the user is authenticated and exists. Create a new neo node in case the users doesn't exist on this side. """ gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) self.method_check(request, allowed=['post']) self.is_authenticated(request) if not request.user or not request.user.is_authenticated(): return self.create_response(request, { 'success': False, 'error_message': 'You are not authenticated, %s.' % request.user }) data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) # get the user via neo look-up or create a newone if request.user.username is not None: userTable = gdb.query("""MATCH (u:`User`) WHERE HAS (u.username) AND u.username='******' RETURN u""") if len(userTable) > 0: userurl = userTable[0][0]['self'] userNode = gdb.nodes.get(userurl) else: userNode = gdb.nodes.create(username=request.user.username) userNode.labels.add("User") document = gdb.nodes.create( CTS = data.get("CTS"), author = data.get("author"), lang = data.get("lang"), name = data.get("name") ) document.labels.add("UserDocument") data['resource_uri'] = '/api/v1/user_document/' + str(document.id) + '/' data['user'] = request.user.username if document is None : # in case an error wasn't already raised raise ValidationError('Document node could not be created.') # Form the connections from the new Submission node to the existing slide and user nodes userNode.owns(document) return self.create_response(request, data) else: return self.error_response(request, {'error': 'User is required.' }, response_class=HttpBadRequest)
def getNewUserId(self): elements=[] conf= DBConf.DBConf() elements =conf.getNeo4jConfig() dbUrl = elements[0] dbUser = elements[1] dbPass = elements[2] db =GraphDatabase(dbUrl,dbUser,dbPass) query ="MATCH (`n: *`) RETURN count(*) as c" results = db.query(query,returns = (str)) for res in results: val= res[0] return int(val)+1
def neo(): db = GraphDatabase("http://localhost:7474", username="******", password="******") tag_value = False res = [] try: tag_value = request.form.getlist('tag_query') except: pass if tag_value: res = [] for tag in tag_value[:-1]: query = 'MATCH (t:Texts)-[r:contains]->(m:Mistake) WHERE m.name="%s" RETURN t, type(r), m' % tag results = db.query(query, returns=(client.Node, str, client.Node)) for r in results: line = r[0]["name"] + ' ' + r[1] + ' ' + r[2]["name"] res.append(line) return render_template("neo.html", res=res, tags=tags)
def test(): gdb = GraphDatabase("http://localhost:7474/db/data/") person=gdb.labels.get("Person") # for p in person.get(name='Lana Wachowski'): # #for p in person.filter(gdb.query("born","gte",1970)): # print p["name"] # print p.properties # print p["born"] #qry="start n=node(10) match n-[r]-() return n, n.name, r" #qry="MATCH (n { name: 'Tom Cruise' })-[r:ACTED_IN]->(c) RETURN c.title,r.roles" #qry="MATCH (n:Person {name:'Tom Cruise'} )return n.name, n.type" qry='MATCH (a { name: "Tom Cruise" })-[r:ACTED_IN]->(m) RETURN a.born > 1920, "Im a literal",(a)-->(),"acted in "+m.title' res=gdb.query(qry) #results = gdb.query(res, returns=(client.Node, unicode, client.Relationship)) for i in res: print i[3]
class GraphDB(object): def __init__(self, database="http://localhost:7474/db/data"): self.gdb = GraphDatabase(database) def addPaper(self, uid, title, authors): new_node = self.gdb.node() new_node.labels.add('Paper') new_node['uid'] = uid new_node['title'] = title new_node['authors'] = authors def getNode(self, uid): get_query = 'MATCH (n:Paper) WHERE n.uid=%d RETURN n'%uid qRes = self.gdb.query(q=get_query, returns=Node) if qRes == None: return None return qRes[0][0] #First element of first result is the expected node def editPaper(self, uid, key, value): node = self.getNode(uid) if not node: return False node.set(key, value) def deletePaper(self, uid): delQuery = 'MATCH (n { uid: %d })-[r]-() DELETE n, r'%uid try: self.gdb.query(q = delQuery) except e: return False return True def setReference(self, sourceUID, targetUID): srcNode = self.getNode(sourceUID) targetNode = self.getNode(targetUID) if srcNode ==None or targetNode ==None: return False newRel = srcNode.relationships.create("REFERENCE", targetNode) return True
def least_accurate(self, request, **kwargs): data = {} data['accuracy_ranking'] = [] gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) accuracy = {} # process accuracy of grammar of submissions of a user gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username = '******' RETURN s""") # get the accuray per ref key for sub in submissions.elements: try: try: accuracy[sub[0]['data']['ref']].append(sub[0]['data']['accuracy']) except KeyError as k: accuracy[sub[0]['data']['ref']] = [] accuracy[sub[0]['data']['ref']].append(sub[0]['data']['accuracy']) except KeyError as k: continue # calculate the averages and sort by it average = {} for ref in accuracy.keys(): average[ref] = 0.0 for value in accuracy[ref]: average[ref] = average[ref] + value average[ref] = average[ref]/len(accuracy[ref]) sorted_dict = sorted(average.iteritems(), key=operator.itemgetter(1)) #sorted_reverse = sorted.reverse() for entry in sorted_dict: data['accuracy_ranking'].append({'ref': entry[0], 'average': average[entry[0]], 'title': Grammar.objects.filter(ref=entry[0])[0].title, 'query': Grammar.objects.filter(ref=entry[0])[0].query}) #return the json return self.create_response(request, data)
def getCategoryExp(self,email): try: conf = DBConf.DBConf() elements = conf.getNeo4jConfig() graphDatabase = GraphDatabase(elements[0],elements[1],elements[2]) query = "MATCH (n) Where n.email='"+email+"' return n.cat1,n.cat2,n.cat3,n.cat4,n.cat5,n.cat6,n.cat7,n.cat8" results= graphDatabase.query(query,returns = (str,str,str,str,str,str,str,str)) for r in results: elements ={} elements ={1:r[0],2:r[1],3:r[2], 4:r[3],5:r[4],6:r[5],7:r[6],8:r[7]} return elements except Exception ,e: print e.message return []
#Diana de Leon 18607 #Fatima Albeño 18060 #Luis Perez Aju 18212 #Programa de consulta de doctores y receta de medicinas #Base de datos en NEO4J #Documentacion extraida de https://neo4j-rest-client.readthedocs.io/en/latest/ from neo4jrestclient.client import GraphDatabase gdb = GraphDatabase("http://localhost:7474", username="******", password="******") def addPaciente(nombre, genero, edad, peso ,estatura): paciente= gdb.nodes.create(Nombre=nombre, Genero=genero, Edad=edad, Peso=peso, Estatura=estatura) paciente.labels.add("Paciente") def addDoctor (nombre, especialidad, telefono, correo, ubicacion): doctor= gdb.nodes.create(Nombre=nombre, Especialidad=especialidad, Telefono=telefono, Correo=correo, Ubicacion=ubicacion) doctor.labels.add("Doctor") def addMedicina (nombre, dosis, vecesaldia, cantdias): medicina = gdb.nodes.create(Nombre=nombre, Dosis=dosis, Veces=vecesaldia, Dias=cantdias) medicina.labels.add("Medicina") def addVisita (nomdoctor, nompaciente): pacientes= gdb.labels.get("Paciente") pacientes.all() doctores= gdb.labels.get("Doctor") doctores.all() pacientes.get(Nombre=nompaciente)[0].relationships.create("consulto",doctores.get(Nombre=nomdoctor)[0]) def prescripcion (nompaciente, nomdoctor,nommedicina):
from neo4jrestclient.client import GraphDatabase import os from neo4jrestclient.constants import RAW from urlparse import urlparse import re from flask import jsonify, request, Blueprint from_api = Blueprint('from_api', __name__) # gdb = GraphDatabase(os.environ.get("GRAPHENEDB_URL")) graphurl = "http://*****:*****@app52089542qmnwmy.sb05.stations.graphenedb.com:24789" gdb = GraphDatabase(graphurl) @from_api.route('/api/gettags', methods=['GET', 'POST']) def gettagslist(): tagsq = "MATCH (tags:Tag)-[r]-() RETURN tags, COUNT(r) ORDER BY COUNT(r) DESC LIMIT 30" nodes = getNodes(gdb, tagsq) finalnodes = [] finalrels = [] for node in nodes: if node not in finalnodes: finalnodes.append(node) result = {'nodes': finalnodes} print("Got tags") # return jsonify({"list": "channels"})
class ExportSummary(object): def __init__(self): self.gdb = GraphDatabase("http://10.223.244.129:7474/db/data/", username="******", password="******") def get_recnames(self): rec_qry = ''' MATCH (RecordingName:RecordingName) RETURN RecordingName.name ''' result = self.gdb.query(q=rec_qry, data_contents=True) print "count of recs :: ", len(result.rows), result.rows return result.rows def generate_summry(self, rec_names_lst): """ [[u'20141215_1540_{08627FB2-FDC3-414E-92B3-EBE39884DA8F}.rrec'], [u'20150731_0439_{7B17D7CD-A8C7-4F64-B08B-4702C55F3D32}.rrec']] :param rec_names_lst: :return: """ for evry_recname in rec_names_lst: q = '''MATCH (CommonData)<-[cd:cd]-(RecordingName)-[c:c]->(Country)- [rt:rt]->(RoadType)- [w:w]->(WeatherCondition)- [lc:lc]->(LightCondition)- [ob:ob]->(ObjectType) WHERE RecordingName.name = '{}' RETURN RecordingName.name as RecName, Country.name as Country, RoadType.name as RoadType, WeatherCondition.name as WeatherCondition, LightCondition.name as LightCondition, CommonData.project as Project, CommonData.function as Function, CommonData.department as Department, ObjectType.name as Objects '''.format( str(evry_recname[0])) result = self.gdb.query(q=q, data_contents=True) rows = result.rows columns = result.columns # print "-->", rows if rows is not None: for item in rows: dt_lst = [dict(zip(columns, item))] sumry_tmplt = self.create_summary(dt_lst) print "sumry_tmplt :: ", sumry_tmplt print "\n" self.write_smry_to_txt(sumry_tmplt) def create_summary(self, result_lst): res_dict = result_lst[0] res_dict = { str(k): ([str(e) for e in v] if type(v) is list else str(v)) for k, v in res_dict.iteritems() } updated_dict = {} for ech_key, ech_val in res_dict.iteritems(): if type(ech_val) is list: updated_dict["{}_num".format(ech_key)] = len(ech_val) res_dict.update(updated_dict) # print "res_dict :: ", res_dict SUMMARY_TEMPLATE = '''The recording {RecName} has labels for {Project} {Function} for {Department} which has observations of {Country_num} Countries {Country} driven in {RoadType_num} road types {RoadType} under {WeatherCondition_num} weather conditions {WeatherCondition} with {LightCondition_num} Light Conditions {LightCondition} having {Objects_num} Objects {Objects}. '''.format(**res_dict) return SUMMARY_TEMPLATE def write_smry_to_txt(self, smry_txt): print "write_smry_to_txt >>>>>>>>>>>>" with open("summary.txt", "a") as fhandle: # fhandle.write(smry_txt + "\n\n") fhandle.write(smry_txt + "\n")
def post_list(self, request, **kwargs): """ Create a new sentence object, and the word objects containing the translations, build the relations; return the json data. """ gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) self.method_check(request, allowed=['post']) self.is_authenticated(request) if not request.user or not request.user.is_authenticated(): return self.create_response(request, { 'success': False, 'error_message': 'You are not authenticated, %s.' % request.user }) data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) if data.get("document_resource_uri") is None : raise ValidationError('document_resource_uri required.') documentResourceUriArray = data.get("document_resource_uri").split('/') documentId = documentResourceUriArray[len(documentResourceUriArray)-2] # get the user and document via neo look-up or create a new one if request.user.username is not None: documentTable = gdb.query("""MATCH (u:`User`)-[:owns]->(d:`UserDocument`) WHERE HAS (u.username) AND ID(d)=""" + documentId +""" AND u.username='******' RETURN u,d""") # test for user node try: documentTable.elements[0][0] except ValidationError: return self.error_response(request, {'error': 'User does not exist.'}, response_class=HttpBadRequest) try: documentTable.elements[0][1] except ValidationError: return self.error_response(request, {'error': 'Document does not exist.'}, response_class=HttpBadRequest) document = gdb.nodes.get(documentTable[0][1]['self']) # create sentence object sentence = gdb.nodes.create(CTS = data.get("CTS"), length = len(data.get("words"))) sentence.labels.add("UserSentence") # loop to create words sentencestring = '' for w in data.get("words"): sentencestring = sentencestring + "" + w["value"] + " " word = gdb.nodes.create( CTS = w["CTS"], lang = w["lang"], length = len(w["value"]), tbwid = w["tbwid"], value = w["value"]) word.labels.add("Word") # loop to create links to translations for cts in w["translations"]: translation = gdb.query("""MATCH (w:`Word`) WHERE HAS (w.CTS) AND w.CTS='""" + cts +"""' RETURN w""") transNode = gdb.nodes.get(translation[0][0]['self']) transNode.translation(word) word.translation(transNode) sentence.words(word) # sentence as string sentence['sentence'] = sentencestring # documetn sentence relation document.sentences(sentence) # maybe here some extra resource _uro_informatin to return #data['resource_uri'] = '/api/v1/document_user/' + str(document.id) + '/' if sentence is None : raise ValidationError('Document node could not be created.') # save sentence to document #userNode.owns(document) return self.create_response(request, data) else: return self.error_response(request, {'error': 'User is required.' }, response_class=HttpBadRequest)
from neo4jrestclient.client import GraphDatabase from neo4jrestclient import client db = GraphDatabase("http://localhost:7474", username="******", password="******") q = 'MATCH (u:Usuario)-[r:follows]->(m:Usuario) WHERE u.name="Alice" RETURN u, type(r), m' results = db.query(q, returns=(client.Node, str, client.Node)) for r in results: print("(%s)-[%s]->(%s)" % (r[0]["name"], r[1], r[2]["name"]))
from bs4 import BeautifulSoup as soup from urllib.request import urlopen import os import re from neo4jrestclient.client import GraphDatabase from neo4jrestclient import client from html.parser import HTMLParser import io import hhh as g db = GraphDatabase("http://localhost:7474", username="******", password="******") my_url = "https://en.wikipedia.org/wiki/Xiaomi_Redmi" my_url1 = "https://en.wikipedia.org/wiki/Acer_Liquid_A1" def fetchdata1(U_rl): try: client = urlopen(U_rl) return (client) except: print("failed try again") return fetchdata1(my_url1) def fetchdata(url): try: client = urlopen(my_url) return (client) except:
def obj_get(self, bundle, **kwargs): # get the actually cached objects if cache.get("sentence_%s" % kwargs['pk']) is not None and not bundle.request.GET.get( 'full') and not bundle.request.GET.get('short'): return cache.get("sentence_%s" % kwargs['pk']) elif bundle.request.GET.get( 'short') and not bundle.request.GET.get('full') and cache.get( "sentence_short_%s" % kwargs['pk']) is not None: return cache.get("sentence_short_%s" % kwargs['pk']) elif bundle.request.GET.get( 'full') and not bundle.request.GET.get('short') and cache.get( "sentence_full_%s" % kwargs['pk']) is not None: return cache.get("sentence_full_%s" % kwargs['pk']) elif cache.get( "sentence_full_short%s" % kwargs['pk']) is not None and not bundle.request.GET.get( 'full') and not bundle.request.GET.get('short'): return cache.get("sentence_full_short_%s" % kwargs['pk']) # query parameters (optional) for short sentence approach attrlist = [ 'CTS', 'length', 'case', 'dialect', 'head', 'form', 'posClass', 'cid', 'gender', 'tbwid', 'pos', 'value', 'degree', 'number', 'lemma', 'relation', 'isIndecl', 'ref', 'posAdd', 'mood', 'tense', 'voice', 'person' ] query_params = {} for obj in bundle.request.GET.keys(): if obj in attrlist and bundle.request.GET.get(obj) is not None: query_params[obj] = bundle.request.GET.get(obj) elif obj.split('__')[0] in attrlist and bundle.request.GET.get( obj) is not None: query_params[obj] = bundle.request.GET.get(obj) gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) sentence = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/') # get the sentence parameters new_obj = DataObject(kwargs['pk']) new_obj.__dict__['_data'] = sentence.properties new_obj.__dict__['_data']['id'] = kwargs['pk'] new_obj.__dict__['_data'][ 'document_resource_uri'] = API_PATH + 'document/' + str( sentence.relationships.incoming( types=["sentences"])[0].start.id) + '/' # get a dictionary of related translation of this sentence relatedSentences = gdb.query( """MATCH (s:`Sentence`)-[:words]->(w:`Word`)-[:translation]->(t:`Word`)<-[:words]-(s1:`Sentence`) WHERE HAS (s.CTS) AND s.CTS='""" + sentence.properties['CTS'] + """' RETURN DISTINCT s1 ORDER BY ID(s1)""") new_obj.__dict__['_data']['translations'] = {} for rs in relatedSentences: sent = rs[0] url = sent['self'].split('/') for lang in CTS_LANG: if sent['data']['CTS'].find("-" + lang + ":") != -1: new_obj.__dict__['_data']['translations'][ lang] = API_PATH + 'sentence/' + url[len(url) - 1] + '/' # get the words and lemma resource uri of the sentence words = gdb.query( """MATCH (d:`Sentence`)-[:words]->(w:`Word`) WHERE d.CTS='""" + sentence.properties['CTS'] + """' RETURN DISTINCT w ORDER BY ID(w)""") wordArray = [] for w in words: word = w[0] url = word['self'].split('/') word['data']['resource_uri'] = API_PATH + 'word/' + url[len(url) - 1] + '/' wordNode = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + url[len(url) - 1] + '/') # get the lemma lemmaRels = wordNode.relationships.incoming(types=["values"]) if len(lemmaRels) > 0: word['data']['lemma_resource_uri'] = API_PATH + 'lemma/' + str( lemmaRels[0].start.id) + '/' # get the translations of a word if parameter is set if bundle.request.GET.get('full'): translations = gdb.query( """MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" + wordNode.properties['CTS'] + """' RETURN DISTINCT w ORDER BY ID(w)""") translationArray = [] for t in translations: trans = t[0] transurl = trans['self'].split('/') trans['data'][ 'resource_uri'] = API_PATH + 'word/' + transurl[ len(transurl) - 1] + '/' translationArray.append(trans['data']) word['data']['translations'] = translationArray wordArray.append(word['data']) wordArray = sort_words(wordArray) # if short=True return only words of the short sentence if bundle.request.GET.get('short'): wordArray = self.shorten(wordArray, query_params) if wordArray is None: #return None raise BadRequest("Sentence doesn't hit your query.") new_obj.__dict__['_data']['words'] = wordArray # deal with caching here -> all are different objects if bundle.request.GET.get('full') and bundle.request.GET.get('short'): if cache.get("sentence_full_short_%s" % kwargs['pk']) is None: cache.set("sentence_full_short_%s" % kwargs['pk'], new_obj, None) if bundle.request.GET.get('short'): if cache.get("sentence_short_%s" % kwargs['pk']) is None: cache.set("sentence_short_%s" % kwargs['pk'], new_obj, None) elif bundle.request.GET.get('full'): if cache.get("sentence_full_%s" % kwargs['pk']) is None: cache.set("sentence_full_%s" % kwargs['pk'], new_obj, None) else: if cache.get("sentence_%s" % kwargs['pk']) is None: cache.set("sentence_%s" % kwargs['pk'], new_obj, None) return new_obj
def make_connection(username, password): return GraphDatabase("http://localhost:7474", username=username, password=password)
from neo4jrestclient.client import GraphDatabase ## Connect to the database - replace localhost with ## the location of your server if it isn't running locally gdb = GraphDatabase("http://localhost:7474/db/data/") print "These are the extensions you can work with:" print gdb.extensions.Airports origin = "PQR" destination = "LMN" ## Create two airports ## (The create_new_airport endpoint returns a boolean ## representing the success or failure of the call) if gdb.extensions.Airports.create_new_airport(call_letters=origin): print "Created Airport {}".format(origin) else: print "ISSUE: Airport {} was not successfully created - perhaps those call letters are already in use".format( origin) if gdb.extensions.Airports.create_new_airport(call_letters=destination): print "Created Airport {}".format(destination) else: print "ISSUE: Airport {} was not successfully created - perhaps those call letters are already in use".format( destination) ## Add a flight between the two airports ## (The add_new_flight endpoint returns a boolean ## representing the success or failure of the call) if gdb.extensions.Airports.add_new_flight(
#coding:utf-8 from neo4jrestclient.client import GraphDatabase from neo4jrestclient.query import Q import sys reload(sys) sys.setdefaultencoding('utf-8') #连接neo4j数据库 gdb = GraphDatabase("http://localhost:7474/db/data/", username="******", password="******") q3 = unicode("match (a:ORDER) where a.IsCon='1' return count(*)") conDiffusion = gdb.query(q3) print conDiffusion[0][0]
__all__ = ['E_user'] from akwadb.typeEUser import TypeEUser from neo4jrestclient.client import GraphDatabase from neo4jrestclient import client url = "http://localhost:7474/db/data/" username = "******" password = "******" gdb = GraphDatabase(url=url, username=username, password=password) class E_user(): # Create a E_user def addNoed_User(self,nom,prenom,mail,tel,user_name,categorie): myUser = gdb.nodes.create(name=nom,prenom=prenom,mail=mail,tel=tel,user_name=user_name,categorie=categorie) e_user = gdb.labels.create("E_user") e_user.add(myUser) # Create all E_user def getAllEUser(self): tableOfUser = [] myUser = gdb.labels.get("E_user") p= myUser.all() # print(len(p)) for element in p: x=str(element).split("/")[-1].split(">")[0] n = gdb.nodes.get(x) tableOfUser.append(n.properties) return tableOfUser # get a E_user by criteria def getEUserByCriteria(self,query):
ret_node_dict = results.graph[0]['nodes'][0]['properties'] ret_node_dict['neoId'] = results.graph[0]['nodes'][0]['id'] # for nodes in results.graph: # nodes_list = nodes['nodes'] # if len(nodes_list) > 1: # print('more than one node') # print(len(nodes_list)) # print(nodes_list) # node_dict = nodes_list[0] # if node_dict['properties']: # ret_node_list.append(node_dict['properties']) return ret_node_dict gdb = GraphDatabase("http://10.1.1.28:7474", username="******", password="******") if __name__ == '__main__': # rel_dict_list = search_triple_neo4j(u"上海中心大厦", '', '') # print(len(rel_dict_list)) # for rel_dict in rel_dict_list: # for rel, val in rel_dict.iteritems(): # print("%s: %s" % (rel, val)) # ret_node_list = search_node_neo4j(u"5333265") # print(len(ret_node_list)) # for node_dict in ret_node_list: # for key, val in node_dict.iteritems(): # print('%s: %s' % (key, val)) # print('---------------')
app = Flask(__name__) api = Api( app, version='1.0.0', title='Users API', description='A simple Users API', ) ns = api.namespace('users', description='Users operations') ps = api.namespace('product', description='Product operations') cs = api.namespace('customer', description='Customer operations') db = GraphDatabase("http://172.18.0.2:7474", username="******", password="******") #q = 'match (n:User) return n.name, n.password, n.active, n.id' #results = db.query(q, returns=(client.Node, str, client.Node)) product = api.model( 'Product', { 'id': fields.Integer(readOnly=True, description='The product unique identifier'), 'productName': fields.String(required=True, description="Name of Product"), 'quantity': fields.String(required=True, description="Quantity in stock"), 'Department': fields.String(required=True, description='Department of Products'),
current_category = string.strip(line[2:]) dic[current_category] = [] tmp = [] #print current_category else: line = line.strip() if len(line) > 0: #print line tmp.append(line) dic[current_category] = tmp #### End of Parsing file### from neo4jrestclient.client import GraphDatabase gdb = GraphDatabase( "http://wowgic.sb02.stations.graphenedb.com:24789/db/data/", username="******", password="******") api = tweepy.API(auth) def categoryMaterialize(text): tmpScrName = "" for cate in dic.get('cCategory'): cate = cate.lower() for area in dic.get('Areas'): area = area.lower() q = 'MATCH (h:aHelper{area_place:\'' + area + '\'})-[:PROVIDES]->(b{name:\'' + cate + '\'})<-[:seeker]-(s:Seeker{area_place:\'' + area + '\'}) return h,s,b' #print q # Send Cypher query. n = gdb.query(q, data_contents=True)
from neo4jrestclient.client import GraphDatabase from neo4jrestclient.query import Q import networkx as nx #使用库的声明 import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties import numpy as np import sys reload(sys) sys.setdefaultencoding('utf-8') import networkx as nx #使用库的声明 import matplotlib.pyplot as plt #连接neo4j数据库 gdb = GraphDatabase("http://localhost:7474/db/data/", username="******", password="******") plt.subplot(2, 1, 1) G = nx.Graph() #查询ShareId param = sys.argv[1] q1 = "MATCH P=(a:SHARE)-[n:APP*]->(b:SHARE) WHERE a.ShareId='NULL' and b.ShareId='" + param + "' RETURN nodes(P)" #执行cypher语句 nodes = gdb.query(q1) for a in nodes: for b in range(len(a[0]) - 1): G.add_edges_from([(a[0][b]['data']['ShareId'][0:8], a[0][b + 1]['data']['ShareId'][0:8])]) q2 = "MATCH P=(a:SHARE)-[n:APP*]->(b:SHARE) WHERE a.ShareId='" + param + "' RETURN nodes(P)" #执行cypher语句
tmp = result[0] article_x.relationships.create("is about", tmp[0], with_a_probability=topic[1]) def create_scientific_article_nodes(json_list): for json_item in json_list: article_x = create_single_scientific_article_node(json_item) create_relatipship_between_nodes(article_x, json_item['article_topics']) dir_path = os.path.dirname(os.path.realpath(__file__)) credentials_json_list = retrieve_list_of_json_file('{}{}'.format( dir_path, r'/data/credentials/')) db = GraphDatabase(credentials_json_list[0]['db_connection'], username=credentials_json_list[0]['db_user'], password=credentials_json_list[0]['db_password']) topic_json_list = retrieve_list_of_json_file('{}{}'.format( dir_path, r'/data/topics/')) counter = 0 for topics in topic_json_list: prepared_topic_json_list = prepare_topic_list(topics['topic_content']) create_topic_nodes(prepared_topic_json_list, counter) counter += 1 scientific_article_json_list = retrieve_list_of_json_file('{}{}'.format( dir_path, r'/data/mode/')) create_scientific_article_nodes(scientific_article_json_list)
#!/usr/bin/env python3 from neo4jrestclient import client from neo4jrestclient.client import GraphDatabase db = GraphDatabase("http://127.0.0.1:7474", username="******", password="******") import pandas as pd import numpy as np import json import keras from keras.utils import plot_model from keras.models import Sequential from keras.layers import Dense from sklearn.model_selection import train_test_split from keras.utils import to_categorical from matplotlib import pyplot print("query observations") #q="MATCH (o:Observation {display:'CURRENT WHO HIV STAGE'})--(e:Encounter) with e match (p:Patient)--(e)--(o:Observation)--(c:Concept) where c.class = 'Test' or o.display = 'CURRENT WHO HIV STAGE' Return p.id as patient_id,e.id as encounter_id,o.display as observation,o.value as value,o.timestamp as timestamp order by p.id,o.timestamp" #q="MATCH (o:Observation {display:'CURRENT WHO HIV STAGE'})--(e:Encounter) with e match (p:Patient)--(e)--(o:Observation)--(c:Concept) Return p.id as patient_id,e.id as encounter_id,o.display as observation,o.value as value,o.timestamp as timestamp order by p.id,o.timestamp" q = "MATCH (o:Observation {display:'CURRENT WHO HIV STAGE'})--(e:Encounter) with e match (p:Patient)--(e)--(o:Observation) where o.timestamp is not null Return p.id as patient_id,e.id as encounter_id,o.display as observation,o.value as value,o.timestamp as timestamp order by p.id,o.timestamp" results = db.query(q, data_contents=True) columns = results.columns stages = {} observations = pd.DataFrame.from_records(results.rows, columns=columns) print("create lookup table") lookup = {} for var in ['patient_id', 'encounter_id']: i = 1 vals = observations[var].unique() lookup[var] = {}
import os from datetime import datetime from pytz import timezone import pandas as pd now = lambda: datetime.now(timezone('UTC')).isoformat() url = lambda n: 'http://alfalfalfa.com/articles/{}.html'.format(n) iso2utc = lambda iso: timezone('UTC').localize(pd.to_datetime(iso)) import time from neo4jrestclient.client import GraphDatabase url = "http://*****:*****@localhost:7474/db/data/" gdb = GraphDatabase(url) alias = 'tamako' access = 'implement' def main(fname): df = pd.read_csv(fname) res2id = {} for i in range(len(df)): res = int(df.ix[i]['res']) anchor = int(df.ix[i]['anchor']) text = str(df.ix[i]['text']) since = str(df.ix[i]['since']) url = str(df.ix[i]['url']) imgs = list(df.ix[i, 5:].dropna())
from neo4jrestclient import client from numpy import * from Proyecro2_Encuesta import * #Autores:María José Lemus 181202, André Rodriguez, Javier Salazar 18764 #Proyecto 2 Estructuras de Datos #Fecha: 24/05/2019 #Descripción: Sistema de recomendaciones de libros, ya sea por autor, numero de paginas, costo del libro, genero del libro #Descripción: Sistema de recomendaciones de libros, ya sea por autor, numero de paginas, costo del libro, genero="" del libro #El algoritmo de recomendacion fue tomado y editado de: https://github.com/andresum97/Proyecto2 #Se importa el grafo desde neo4j #Nota: Se necesita instalar neo4jrestclient para que se conecte a la base de datos #Conexión a la base de datos, usando el puerto, el usuario y la contraseña de la base de datos db = GraphDatabase("http://localhost:11002", username="******", password="******") generos = [ "Novela Negra", "Dramatico", "Terror", "Prosa", "Ensayo", "Narrativa", "Novela", "Ciencia Ficcion", "Periodistico", "Sagas", "Aventura", "Poesia" ] #Funcion para las preguntas def Preguntar(pregunta, respuestas): bandera = True respuesta = "" while bandera: respuesta = input(pregunta + "\n1. Si\n2. No\n").lower().strip() if respuesta == "si" or respuesta == "no": bandera = False
from neo4jrestclient.client import GraphDatabase url = "http://*****:*****@localhost:7474/db/data/" gdb = GraphDatabase(url) from datetime import datetime import random with open('nuc-accounts.txt') as f: line = f.readline() while line: account_handle, account_bio = line.split('#') account_alias = 'nuc_' + account_handle account_bio = account_bio.rsplit()[0] now = datetime.now().strftime("%Y%m%dT%H%M%S+0900") color = '#' + str(random.randint(0, 16777215)) #ffffff now = datetime.now().strftime("%Y%m%dT%H%M%S+0900") gdb.query('MERGE (a:Account:Implement {handle:"%s"})' % (account_handle), data_contents=True) gdb.query(''' MATCH (a:Account) WHERE a.name="%s" SET a+={alias:"%s",bio:"%s",since:"%s",color:"%s"} ''' % (account_handle, account_alias, account_bio, now, color), data_contents=True) line = f.readline()
def get_object_list(self, request): gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) attrlist = ['CTS', 'length', 'sentence'] sentences = [] query_params = {} for obj in request.GET.keys(): if obj in attrlist and request.GET.get(obj) is not None: query_params[obj] = request.GET.get(obj) elif obj.split('__')[0] in attrlist and request.GET.get( obj) is not None: query_params[obj] = request.GET.get(obj) # implement filtering if len(query_params) > 0: # generate query q = """MATCH (d:`Document`)-[:sentences]->(s:`Sentence`) WHERE """ # filter word on parameters for key in query_params: if len(key.split('__')) > 1: if key.split('__')[1] == 'contains': q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split( '__')[0] + """=~'.*""" + query_params[ key] + """.*' AND """ elif key.split('__')[1] == 'startswith': q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split( '__')[0] + """=~'""" + query_params[ key] + """.*' AND """ elif key.split('__')[1] == 'endswith': q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split( '__')[0] + """=~'.*""" + query_params[ key] + """' AND """ elif key.split('__')[1] == 'gt': q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split('__')[ 0] + """>""" + query_params[key] + """ AND """ elif key.split('__')[1] == 'lt': q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split('__')[ 0] + """<""" + query_params[key] + """ AND """ elif key.split('__')[1] == 'isnot': if key.split('__')[0] == 'length': q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split( '__')[0] + """<>""" + query_params[ key] + """ AND """ else: q = q + """HAS (s.""" + key.split( '__')[0] + """) AND s.""" + key.split( '__')[0] + """<>'""" + query_params[ key] + """' AND """ else: if key == 'length': q = q + """HAS (s.""" + key + """) AND s.""" + key + """=""" + query_params[ key] + """ AND """ else: q = q + """HAS (s.""" + key + """) AND s.""" + key + """='""" + query_params[ key] + """' AND """ q = q[:len(q) - 4] q = q + """RETURN s, d ORDER BY ID(s)""" table = gdb.query(q) # default querying else: table = gdb.query( """MATCH (d:`Document`)-[:sentences]->(s:`Sentence`) WHERE HAS (s.CTS) RETURN s, d ORDER BY ID(s)""" ) # create the objects which was queried for and set all necessary attributes for t in table: sentence = t[0] document = t[1] url = sentence['self'].split('/') urlDoc = document['self'].split('/') new_obj = DataObject(url[len(url) - 1]) new_obj.__dict__['_data'] = sentence['data'] new_obj.__dict__['_data']['id'] = url[len(url) - 1] new_obj.__dict__['_data'][ 'document_resource_uri'] = API_PATH + 'document/' + urlDoc[ len(urlDoc) - 1] + '/' sentences.append(new_obj) if ENABLE_DISPLAYING_LONG_DOCUMENTS: if len(sentences) > 500: return sentences else: return sort_sentences(sentences) else: return sort_sentences(sentences) return sort_sentences(sentences)
def get_object_list(self, request): gdb = GraphDatabase(GRAPH_DATABASE_REST_URL) attrlist = ['CTS', 'length', 'sentence'] sentences = [] query_params = {} for obj in request.GET.keys(): if obj in attrlist and request.GET.get(obj) is not None: query_params[obj] = request.GET.get(obj) elif obj.split('__')[0] in attrlist and request.GET.get(obj) is not None: query_params[obj] = request.GET.get(obj) # implement filtering if len(query_params) > 0: # generate query q = """MATCH (u:`User`)-[:owns]->(d:UserDocument)-[:sentences]->(s:UserSentence) WHERE """ # filter word on parameters for key in query_params: if len(key.split('__')) > 1: if key.split('__')[1] == 'contains': q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """=~'.*""" +query_params[key]+ """.*' AND """ elif key.split('__')[1] == 'startswith': q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """=~'""" +query_params[key]+ """.*' AND """ elif key.split('__')[1] == 'endswith': q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """=~'.*""" +query_params[key]+ """' AND """ elif key.split('__')[1] == 'gt': q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """>""" +query_params[key]+ """ AND """ elif key.split('__')[1] == 'lt': q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """<""" +query_params[key]+ """ AND """ elif key.split('__')[1] == 'isnot': if key.split('__')[0] == 'length': q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """<>""" +query_params[key]+ """ AND """ else: q = q + """HAS (s.""" +key.split('__')[0]+ """) AND s.""" +key.split('__')[0]+ """<>'""" +query_params[key]+ """' AND """ else: if key == 'length': q = q + """HAS (s.""" +key+ """) AND s.""" +key+ """=""" +query_params[key]+ """ AND """ else: q = q + """HAS (s.""" +key+ """) AND s.""" +key+ """='""" +query_params[key]+ """' AND """ # is user set if params not empty? if request.GET.get('user'): q = q + """ u.username='******'user') + """' RETURN s, d, u.username ORDER BY ID(s)""" else: q = q[:len(q)-4] q = q + """RETURN s, d, u.username ORDER BY ID(s)""" table = gdb.query(q) # default querying else: # is user set if params are empty? if request.GET.get('user'): table = gdb.query("""MATCH (u:`User`)-[:owns]->(d:`UserDocument`)-[:sentences]->(s:UserSentence) WHERE u.username='******'user') + """' RETURN DISTINCT s, d, u.username ORDER BY ID(d)""") else: table = gdb.query("""MATCH (u:`User`)-[:owns]->(d:UserDocument)-[:sentences]->(s:UserSentence) RETURN s, d, u.username ORDER BY ID(s)""") # create the objects which was queried for and set all necessary attributes for t in table: sentence = t[0] document = t[1] user = t[2] url = sentence['self'].split('/') urlDoc = document['self'].split('/') new_obj = DataObject(url[len(url)-1]) new_obj.__dict__['_data'] = sentence['data'] new_obj.__dict__['_data']['id'] = url[len(url)-1] new_obj.__dict__['_data']['document_resource_uri'] = API_PATH + 'user_document/' + urlDoc[len(urlDoc)-1] +'/' new_obj.__dict__['_data']['user'] = user sentences.append(new_obj) return sentences
# coding: utf8 from neo4jrestclient.client import GraphDatabase import json ################ set your dump's meta data ##################### dump_file = "data/pentecontaetia_dump.json" # the CTS of the document that is going to be dumped; dump_document_CTS = "urn:cts:greekLit:tlg0003.tlg001.perseus-grc" host = "http://localhost:7474/db/data/" ################################################################ # graph database instance gdb = GraphDatabase(host) file = open(dump_file, 'w') # query for the document q = """MATCH (d:`Document`) WHERE d.CTS='""" + dump_document_CTS + """' RETURN d""" table = gdb.query(q) document_dict = {} for doc in table: document = doc[0] documentNode = gdb.nodes.get(document['self']) # get the metas of the document for doc_attr in documentNode.properties: document_dict[doc_attr] = documentNode.properties[doc_attr] sent_table = gdb.query(
def Connection(): return GraphDatabase(URLConnection())
class Neo4jGraphHandler: """Class containing some helper functions for neo4j graphing.""" def __init__(self, base_dir): """Sets paths and reads in relevant config file parameters. Identifies the config file location with respect to the base code directory. Opens config file and reads in graph-specific config options (i.e., neo4j URL, username and password). :param base_dir: string specifying base directory for the code """ # Get path to config file. path_config_file = os.path.join(base_dir, 'config', 'jandroid.conf') # Read config file. config = configparser.ConfigParser() config.read(path_config_file) self.neo4j_url = 'http://localhost:7474' self.neo4j_username = '******' self.neo4j_password = '******' if config.has_section('NEO4J'): if config.has_option('NEO4J', 'URL'): self.neo4j_url = config['NEO4J']['URL'] if config.has_option('NEO4J', 'USERNAME'): self.neo4j_username = config['NEO4J']['USERNAME'] if config.has_option('NEO4J', 'PASSWORD'): self.neo4j_password = config['NEO4J']['PASSWORD'] # Initialise database connection object. self.db = None def fn_connect_to_graph(self): """Connects to Neo4j database. Creates a connection to the neo4j database, using the parameters specified in the config file (or default values). :raises JandroidException: an exception is raised if connection to neo4j database fails. """ logging.info('Trying to connect to Neo4j graph DB.') try: self.db = GraphDatabase(self.neo4j_url, username=self.neo4j_username, password=self.neo4j_password) logging.info('Connected to graph DB.') except Exception as e: raise JandroidException({ 'type': str(os.path.basename(__file__)) + ': GraphConnectError', 'reason': 'Unable to connect to Neo4j ' + 'graph database. ' + 'Are you sure it\'s running? ' + 'Returned error is: ' + str(e) }) def fn_initialise_graph(self): """Initialises graph by removing all existing nodes/relationships.""" # Connect to graph if not already connected. if self.db == None: self.fn_connect_to_graph() # Delete all existing nodes and relationships. delete_all_query = 'MATCH (n) DETACH DELETE n' _ = self.fn_execute_graph_query(delete_all_query) def fn_create_uniqueness_constraint(self, label, attribute): """Creates uniqueness constraint on an attribute for a label. This function creates a uniqueness constraint against all nodes that have a particular label, requiring that the specified attribute be unique. :param label: the label of the node(s) to create the constraint on :param attribute: the attribute to create the constraint on """ # Labels in neo4j have ":" as the first character. If the given # label doesn't have ":" as its first character, prepend ":". if label[0] != ':': label = ':' + label # Create graph query from inputs. uniqueness_constraint = 'CREATE CONSTRAINT ON (n' \ + label \ + ') ASSERT n.' \ + attribute \ + ' IS UNIQUE' # Execute graph query. constraint_query = self.fn_execute_graph_query(uniqueness_constraint) def fn_create_node_key(self, label, attributes): """Creates node key from attributes for a label. This function creates a node key from a group of attributes against all nodes that have a particular label. :param label: the label of the node(s) to create the constraint on :param attributes: list<string> or string of attribute(s) for node key """ # Labels in neo4j have ":" as the first character. If the given # label doesn't have ":" as its first character, prepend ":". if label[0] != ':': label = ':' + label # Create query from inputs. uniqueness_constraint = 'CREATE CONSTRAINT ON (n' \ + label \ + ') ASSERT (' # We append each attribute individually. attribute_list = '' if type(attributes) is not list: attributes = [attributes] for attribute in attributes: if attribute_list == '': attribute_list = attribute_list + 'n.' + attribute else: attribute_list = attribute_list + ', n.' + attribute uniqueness_constraint = uniqueness_constraint \ + attribute_list \ + ') IS NODE KEY' # Execute graph query. constraint_query = self.fn_execute_graph_query(uniqueness_constraint) def fn_create_node(self, attributes=[], labels=[]): """Creates a node in neo4j graph with the given labels and attributes. The labels argument is a list of strings, where each label string is prefixed by ":". The attributes argument is a list of strings in key:value format. Sample input: attributes = ['attr1:"value1"', 'attr2:["value2-1","value2-2"]'] labels = [':Label1', ':Label2'] :param attributes: list<string> of attributes in key:value format :param labels: list<string> of labels in :name format """ # Query string. graph_query = 'CREATE ' + self.fn_create_node_query_item( attributes, labels, 'n') # Execute graph query. self.fn_execute_graph_query(graph_query) def fn_create_relationship(self, start_node_obj, end_node_obj, rel_name): """Creates a relationship in neo4j graph between two existing nodes. :param start_node_obj: dictionary object with possible keys: 'attributes' and 'labels' denoting the node to create the relationship from :param end_node_obj: dictionary object with possible keys: 'attributes' and 'labels' denoting the node to create the relationship to :param rel_name: name for the relationship """ # Retrieve attributes and labels as list from the input objects # for start and end objects. [start_node_attributes, start_node_labels] = \ self.fn_get_attributes_labels(start_node_obj) start_node_match_string = self.fn_create_match_query( start_node_attributes, start_node_labels, 'n') [end_node_attributes, end_node_labels] = \ self.fn_get_attributes_labels(end_node_obj) end_node_match_string = self.fn_create_match_query( end_node_attributes, end_node_labels, 'm') # Relationship names must begin with ":". If the specified # relationship does not, then prepend ":". if rel_name[0] != ':': rel_name = ':' + rel_name # Create the database query. relation_query = start_node_match_string \ + ' ' \ + end_node_match_string \ + ' ' \ + 'MERGE (n)-[' \ + rel_name \ + ']->(m) ' \ + 'RETURN n, m' # Execute database query. self.fn_execute_graph_query(relation_query) def fn_create_relationship_from_labels(self, start_label, end_label, rel_name): """Creates a relationship between nodes having specific labels. :param start_label: label string for start node :param end_label: label string for end node :param rel_name: name for the relationship """ # Labels and relationship names must begin with ":". # If the specified labels/relationships do not, then prepend ":". if start_label[0] != ':': start_label = ':' + start_label if end_label[0] != ':': end_label = ':' + end_label if rel_name[0] != ':': rel_name = ':' + rel_name # Create the database query. relation_query = 'MATCH (n' \ + start_label \ + ') ' \ + 'MATCH (m' \ + end_label \ + ') ' \ + 'CREATE (n)-[' \ + rel_name \ + ']->(m) ' \ + 'RETURN n, m' # Execute the database query. self.fn_execute_graph_query(relation_query) def fn_get_attributes_labels(self, object): """Retrieves attributes and labels from object. This function takes as input a dictionary object containing (at least) two keys: "attributes" and "labels". It retrieves the attributes and labels and returns them as lists within a list. :param object: dictionary object containing keys: "attributes" and "labels" :returns: list containing two lists: one of attributes, one of labels """ if 'attributes' in object: node_attributes = object['attributes'] else: node_attributes = [] if 'labels' in object: node_labels = object['labels'] else: node_labels = [] if ((node_attributes == []) and (node_labels == [])): raise JandroidException({ 'type': str(os.path.basename(__file__)) + ': EmptyAttributeAndLabelList', 'reason': 'A node must have at least ' + 'one attribute or label.' }) return [node_attributes, node_labels] def fn_create_match_query(self, attributes, labels, id='n'): """Creates a Cypher MATCH query for node from attributes and labels. This function takes as input attributes and labels for a node (and optionally an identifier), and returns a Cypher MATCH query. e.g., if the function is called with self.fn_create_match_query(['att1:"val1"'], [':Label1'], 'm') the output will be 'MATCH (m:Label1 {att1:"val1"})' :param attributes: list<string> of attributes in key:value format :param labels: list<string> of labels :param id: node identifier string; default value is "n" :returns: MATCH query as string """ match_string = 'MATCH ' + self.fn_create_node_query_item( attributes, labels, id) return match_string def fn_create_node_query_item(self, attributes, labels, id='n'): """Creates a string representing a node from attributes and labels. This function takes as input a list of attributes and labels (and optionally, a node identifier), and returns a string representing the node. e.g., if the function is called with self.fn_create_node_query_item(['att1:"val1"'], [':Label1'], 'm') it will return '(m:Label1 {att1:"val1"})' :param attributes: list<string> of attributes in key:value format :param labels: list<string> of labels :param id: node identifier string; default value is "n" :returns: Node representation string """ # Query string start. node_string = '(' + id # Add all labels. for label in labels: node_string = node_string + label # Add attributes. Attributes need a bit more processing because # of the way they need to be specified. if attributes != []: attribute_string = '' for attribute in attributes: if attribute_string == '': attribute_string = attribute else: attribute_string = attribute_string + ',' + attribute # Add attribute part to graph query. node_string = node_string + ' {' + attribute_string + '}' # Close query string. node_string = node_string + ')' return node_string def fn_execute_graph_query(self, cypher_query): """Executes the provided Cypher query against a neo4j graph. :param cypher_query: the Cypher query to execute against the neo4j graph :raises JandroidException: an exception is raised if query fails to execute """ try: res = self.db.query(cypher_query) except Exception as e: raise JandroidException({ 'type': str(os.path.basename(__file__)) + ': DBQueryError', 'reason': str(e) }) logging.debug('Executed query "' + cypher_query + '" with result stats: ' + str(res.stats) + ' and values: ' + str(res.rows)) return res
''' Created on Nov 5, 2016 @author: Victor ''' from neo4jrestclient import traversals from neo4jrestclient.client import GraphDatabase from neo4jrestclient.query import Q db = GraphDatabase('http://localhost:7474/db/data/') neo = db.node(name='Thomas Anderson', age=29) neo.labels.add('Character') trinity = db.node(name='Trinity') trinity.labels.add('Character') morpheus = db.node(name='Morpheus', rank='Captain') morpheus.labels.add('Character') cypher = db.node(name='Cypher') cypher.labels.add('Character') agent_smith = db.node(name='Agent Smith', language='C++', version='1.0b') agent_smith.labels.add('Character') architect = db.node(name='The Architect') architect.labels.add('Character') neo.KNOWS(trinity, age='3 days') morpheus.KNOWS(trinity, age='12 years') neo.KNOWS(morpheus) morpheus.KNOWS(cypher, disclosure='public') cypher.KNOWS(agent_smith, disclosure='secret', age='6 months') agent_smith.CODED_BY(architect)
def __init__(self): self.gdb = GraphDatabase("http://10.223.244.129:7474/db/data/", username="******", password="******")
class Neo4jDataStore(object): """Implements the Neo4j datastore. Attributes: client: Instance of Neo4j GraphDatabase """ def __init__(self, username, password, host='127.0.0.1', port=7474): """Create a neo4j client. Args: username: Neo4j username password: Neo4j password host: Neo4j host port: Neo4j port """ super(Neo4jDataStore, self).__init__() self.client = GraphDatabase('http://{0:s}:{1:d}/db/data/'.format( host, port), username=username, password=password) @staticmethod def _get_formatter(output_format): """Get format class instance from format name. Args: output_format: Name as string of output format Returns: Output formatter object """ default_output_format = 'neo4j' formatter_registry = { 'neo4j': Neo4jOutputFormatter, 'cytoscape': CytoscapeOutputFormatter } formatter = formatter_registry.get(output_format, None) if not formatter: formatter = formatter_registry.get(default_output_format) return formatter() def query(self, query, params=None, output_format=None, return_rows=False): """Search the graph. Args: query: A cypher query params: A dictionary with query parameters output_format: Name of the output format to use return_rows: Boolean indicating if rows should be returned Returns: Dictionary with formatted query result """ data_content = DATA_GRAPH if return_rows: data_content = True query_result = self.client.query(query, params=params, data_contents=data_content) formatter = self._get_formatter(output_format) return formatter.format(query_result, return_rows)
from flask import Flask, render_template, url_for, flash, redirect from forms import RegistrationForm, LoginForm from neo4jrestclient.client import GraphDatabase from neo4jrestclient.query import Q db = GraphDatabase("http://*****:*****@app.route("/") @app.route("/home") def home(): return render_template('home.html') @app.route("/about") def about(): return render_template('about.html', title = 'About') @app.route("/register", methods = ['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): flash(f'Account created for {form.username.data}!', 'success') return redirect(url_for('home')) return render_template('register.html', title = 'Register', form = form)
from neo4jrestclient.client import GraphDatabase import os import time os.system("./neo4j-community-3.4.0/bin/neo4j stop") os.system("rm -r ./neo4j-community-3.4.0/data/databases/graph.db") os.system("./neo4j-community-3.4.0/bin/neo4j start") time.sleep(20) db = GraphDatabase("http://localhost:7474", username="******", password="******") archivo = open("./output/finalOutput.csv","r") lineas = archivo.readlines() rating = [] prediction = [] conclusion = [] for i in lineas: linea = i.split(",") rating.append(linea[0]) prediction.append(linea[1]) conclusion.append(linea[2].rstrip()) # Create some nodes with labels ratingL = db.labels.create("Rating") predictionL = db.labels.create("Prediction") conclusionL = db.labels.create("Conclusion") for i,j,z in zip(rating,prediction,conclusion): r1 = db.nodes.create(name=str(i)) ratingL.add(r1)