示例#1
0
    def obj_get(self, bundle, **kwargs):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        document = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
        
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = document.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']
        new_obj.__dict__['_data']['user'] = str(document.relationships.incoming(types=["owns"])[0].start.properties['username'])
        
        sentences = gdb.query("""MATCH (u:`User`)-[:owns]->(d:`UserDocument`)-[:sentences]->(s:`UserSentence`) WHERE d.CTS='""" +document.properties['CTS']+ """' RETURN DISTINCT s ORDER BY ID(s)""")
        sentenceArray = []
        for s in sentences:
            sent = s[0]
            url = sent['self'].split('/')
            # this might seems a little hacky, but API resources are very decoupled,
            # which gives us great performance instead of creating relations amongst objects and referencing/dereferencing foreign keyed fields
            sent['data']['resource_uri'] = API_PATH + 'user_sentence/' + url[len(url)-1] + '/'
            sentenceArray.append(sent['data'])
                
            new_obj.__dict__['_data']['sentences'] = sentenceArray

        # get a dictionary of related translations of this document
        relatedDocuments = gdb.query("""MATCH (d:`UserDocument`)-[:sentences]->(s:`UserSentence`)-[:words]->(w:`Word`)-[:translation]->(t:`Word`)<-[:words]-(s1:`Sentence`)<-[:sentences]-(d1:`Document`) WHERE HAS (d.CTS) AND d.CTS='""" + document.properties['CTS'] + """' RETURN DISTINCT d1 ORDER BY ID(d1)""")
        
        new_obj.__dict__['_data']['translations']={}
        for rd in relatedDocuments:
            doc = rd[0]
            url = doc['self'].split('/')
            if doc['data']['lang'] in CTS_LANG:
                new_obj.__dict__['_data']['translations'][doc['data']['lang']] = doc['data']
                new_obj.__dict__['_data']['translations'][doc['data']['lang']]['resource_uri']= API_PATH + 'document/' + url[len(url)-1] +'/'


        return new_obj
示例#2
0
def test_read_neo(clean_slate):
    """
    Read a graph from a Neo4j instance.
    """
    driver = GraphDatabase(
        DEFAULT_NEO4J_URL, username=DEFAULT_NEO4J_USERNAME, password=DEFAULT_NEO4J_PASSWORD
    )
    for q in queries:
        driver.query(q)
    s = NeoSource()
    g = s.parse(
        uri=DEFAULT_NEO4J_URL, username=DEFAULT_NEO4J_USERNAME, password=DEFAULT_NEO4J_PASSWORD
    )
    nodes, edges = process_stream(g)
    assert len(nodes.keys()) == 3
    assert len(edges.keys()) == 2

    n1 = nodes['A']
    assert n1['id'] == 'A'
    assert n1['name'] == 'A'
    assert 'category' in n1 and 'biolink:NamedThing' in n1['category']

    e1 = edges[('A', 'C')][0]
    assert e1['subject'] == 'A'
    assert e1['object'] == 'C'
    assert e1['predicate'] == 'biolink:related_to'
    assert e1['relation'] == 'biolink:related_to'
示例#3
0
def da_words_sofar():
    from neo4jrestclient.client import GraphDatabase
    gdb = GraphDatabase("http://localhost:7474/db/data/",
                        username="******",
                        password="******")
    ids = dict()
    q = "MATCH (turn_start:TIMEPOINT)-[turn:TURN]->(turn_end:TIMEPOINT) RETURN turn"
    turns = gdb.query(q=q)
    for each_turn in turns:
        start = each_turn[0]["data"]["start"]
        end = each_turn[0]["data"]["end"]
        q_dacts = "MATCH (dact_start:TIMEPOINT)-[dact:DIALACT]->(dact_end:TIMEPOINT) WHERE %f<=dact_start.time AND %f>=dact_end.time return dact" % (
            start, end)
        dacts = gdb.query(q=q_dacts)
        total_words = 0
        for da in dacts:
            words = da[0]['data']['words']
            da_id = da[0]['data']['id']
            total_words += words
            ids[da_id] = total_words

    for k, v in sorted(ids.items()):
        if v != None:
            q = "MATCH (dact_start:TIMEPOINT)-[dact:DIALACT]->(dact_end:TIMEPOINT) WHERE dact.id = '%s' SET dact.words_sofar=%s RETURN dact" % (
                k, v)
            print k
            print v
            dacts = gdb.query(q=q)
示例#4
0
def clean_slate(source='kgx-unit-test'):
    http_driver = GraphDatabase(DEFAULT_NEO4J_URL,
                                username=DEFAULT_NEO4J_USERNAME,
                                password=DEFAULT_NEO4J_PASSWORD)
    q = "MATCH (n { source : '" + source + "' }) DETACH DELETE (n)"
    print(q)
    http_driver.query(q)
示例#5
0
class Neo4jApi:
    gdb = None
    username = None
    pwd = None

    def __init__(self, gdbLink, uName, password):
        self.gdb = GraphDatabase(gdbLink, uName, password)
        self.username = uName
        self.pwd = password

    def getNode(self, labels, name):
        query = "MATCH (a:" + labels + "{name: \"" + name + "\"}) RETURN a"
        return gn.GraphNode(self.gdb.query(query)[0][0])

    def getNodeByID(self, linkId):
        id = linkId.split("/")[-1]
        query = "MATCH (a) WHERE ID(a)=" + id + " RETURN a"
        return gn.GraphNode(self.gdb.query(query)[0][0])

    def getRelations(self, graphNode):
        query = "MATCH (a:" + graphNode.getLabel(
        ) + "{name: \"" + graphNode.getData()["name"] + "\"})-[r]-(b) RETURN r"
        relationsList = []
        for el in self.gdb.query(query):
            relationsList.append(gr.GraphRelation(el[0]))
        return relationsList

    def getRelationsIN(self, graphNode):
        query = "MATCH (a:" + graphNode.getLabel(
        ) + "{name: \"" + graphNode.getData(
        )["name"] + "\"})<-[r]-(b) RETURN r"
        relationsList = []
        for el in self.gdb.query(query):
            relationsList.append(gr.GraphRelation(el[0]))
        return relationsList

    def getRelationsOUT(self, graphNode):
        query = "MATCH (a:" + graphNode.getLabel(
        ) + "{name: \"" + graphNode.getData(
        )["name"] + "\"})-[r]->(b) RETURN r"
        relationsList = []
        for el in self.gdb.query(query):
            relationsList.append(gr.GraphRelation(el[0]))
        return relationsList

    def getEndNodesByRelationType(self, relationsOutList, relationType):
        endNodes = []
        for el in relationsOutList:
            if el.getType() == relationType:
                endNodes.append(self.getNodeByID(el.getEndID()))
        return endNodes

    def getStartNodesByRelationType(self, relationsInList, relationType):
        startNodes = []
        for el in relationsInList:
            if el.getType() == relationType:
                startNodes.append(self.getNodeByID(el.getStartID()))
        return startNodes
示例#6
0
    def obj_get(self, bundle, **kwargs):
        
        # query parameters (optional) for short sentence approach
        attrlist = ['CTS', 'length', 'case', 'dialect', 'head', 'form', 'posClass', 'cid', 'gender', 'tbwid', 'pos', 'value', 'degree', 'number','lemma', 'relation', 'isIndecl', 'ref', 'posAdd', 'mood', 'tense', 'voice', 'person']
        query_params = {}
        for obj in bundle.request.GET.keys():
            if obj in attrlist and bundle.request.GET.get(obj) is not None:
                query_params[obj] = bundle.request.GET.get(obj)
            elif obj.split('__')[0] in attrlist and bundle.request.GET.get(obj) is not None:
                query_params[obj] = bundle.request.GET.get(obj)
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        sentence = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
        documentNode = sentence.relationships.incoming(types=["sentences"])[0].start
        # get the sentence parameters            
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = sentence.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']
        new_obj.__dict__['_data']['document_resource_uri'] = API_PATH + 'user_document/' + str(sentence.relationships.incoming(types=["sentences"])[0].start.id) + '/'
        new_obj.__dict__['_data']['user'] = str(documentNode.relationships.incoming(types=["owns"])[0].start.properties['username'])
        
        # get a dictionary of related translation of this sentence # shall this be more strict (only user)
        relatedSentences = gdb.query("""MATCH (s:`UserSentence`)-[:words]->(w:`Word`)-[:translation]->(t:`Word`)<-[:words]-(s1:`Sentence`) WHERE HAS (s.CTS) AND s.CTS='""" + sentence.properties['CTS'] + """' RETURN DISTINCT s1 ORDER BY ID(s1)""")
        
        new_obj.__dict__['_data']['translations']={}
        for rs in relatedSentences:
            sent = rs[0]
            url = sent['self'].split('/')
            for lang in CTS_LANG:
                if sent['data']['CTS'].find(lang) != -1:
                    new_obj.__dict__['_data']['translations'][lang] = API_PATH + 'sentence/' + url[len(url)-1] +'/'        
        
        # get the words and related information    
        words = gdb.query("""MATCH (d:`UserSentence`)-[:words]->(w:`Word`) WHERE d.CTS='""" +sentence.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""")
        wordArray = []
        for w in words:
            word = w[0]
            url = word['self'].split('/')
            word['data']['resource_uri'] = API_PATH + 'word/' + url[len(url)-1] + '/'
            wordNode = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + url[len(url)-1] + '/')

            # get the full translation
            if bundle.request.GET.get('full'):            
                translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" +wordNode.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""")
                translationArray = []
                for t in translations:
                    trans = t[0]
                    transurl = trans['self'].split('/')
                    trans['data']['resource_uri'] = API_PATH + 'word/' + transurl[len(transurl)-1] + '/'
                    translationArray.append(trans['data'])
                word['data']['translations'] = translationArray
                
            wordArray.append(word['data'])
        
        
        new_obj.__dict__['_data']['words'] = wordArray

        return new_obj
示例#7
0
def getSubgraph():
    op={"nodes":[],"links":[]}
    nodes=[]

    db1 = GraphDatabase("http://localhost:7474/")
    q1 = ' '.join(['MATCH n-[r]->m','WHERE n.name="batman"','RETURN n,r,m;'])
    q2 = ' '.join(['MATCH n-[r]->m WHERE n.name="batman"','WITH n,r,m MATCH q-[r2]->p','WHERE n-[r]->q AND n-[r]->p','RETURN q,r2,p limit 200;'])
    print "starting"
    results1=db1.query(q1,returns=(client.Node, client.Relationship, client.Node))
    print "HERE"
    for result in results1:
        n1=result[0].properties['name']
        n2=result[2].properties['name']
        try:
            i1=nodes.index(n1)
        except:
            nodes.append(n1)
            i1=nodes.index(n1)
            op["nodes"].append({"name":n1})
        try:
            i2=nodes.index(n2)
        except:
            nodes.append(n2)
            i2=nodes.index(n2)
            op["nodes"].append({"name":n2})
        
        r = result[1].type
        op["links"].append({"source":i1,"target":i2,"type":r})
        
    print op


    results2 = db1.query(q2,returns=(client.Node, client.Relationship, client.Node))
    print "THERE!"
    for result in results2:
        n1=result[0].properties['name']
        n2=result[2].properties['name']
        #try:
        i1=nodes.index(n1)
        """
        except:
            nodes.append(n1)
            i1=nodes.index(n1)
            op["nodes"].append({"name":n1})
        """    
        #try:
        i2=nodes.index(n2)
        """
        except:
            nodes.append(n2)
            i2=nodes.index(n2)
            op["nodes"].append({"name":n2})
        """
        r = result[1].type
        op["links"].append({"source":i1,"target":i2,"type":r})
        
    print op
    json.dump(op,open('subgraph.json','w'))
示例#8
0
def clean_slate():
    http_driver = GraphDatabase(DEFAULT_NEO4J_URL,
                                username=DEFAULT_NEO4J_USERNAME,
                                password=DEFAULT_NEO4J_PASSWORD)
    q = "MATCH (n) DETACH DELETE (n)"
    try:
        http_driver.query(q)
    except CypherException as ce:
        print(ce)
示例#9
0
class BaseGraph(object):
    def __init__(self):
        host = config.get("GRAPHDB_HOST")  #""
        user = config.get("GRAPHDB_USER")  #""
        pw = config.get("GRAPHDB_PW")  #""
        self.db = GraphDatabase(host, username=user, password=pw)

    def get_search_categories_graph_db(self,
                                       in_vis_type="line",
                                       in_vis_id="1",
                                       in_vis_container_id="1"):
        vis_id = str(1)
        if int(in_vis_id) > 0:
            vis_id = in_vis_id
        vis_container_id = str(1)
        if int(in_vis_container_id) > 0:
            vis_container_id = in_vis_container_id
        q = 'match(a:VisContainer) - [r:has] - (p) where a.tid=' + str(
            vis_id) + ' '
        q += 'return p.tid as ID, p.tname as NAME, p.tcat as CATEGORY order by toInteger(p.tid) '
        q += 'union MATCH (a:SearchCategory ) WHERE a.tcat in ["measure", "cause", "risk", "location", "age_group", "sex", "unit", "vistype", "viscontainer", "model_version", "year_group"] RETURN a.tid AS ID, a.tname AS NAME, a.tcat as CATEGORY order by toInteger(a.tid), a.tname'
        print('get_search_categories_graph_db vis_id, vis_container_id, q',
              in_vis_id, in_vis_container_id, q)
        results = self.db.query(q, returns=(str, unicode, str))
        ### how to use dataset
        #for r in results:
        #   print("(%s)-[%s]-[%s]" % (r[0], r[1], r[2]))
        return results

    def get_search_setting_graph_db(self,
                                    in_vis_type="line",
                                    in_vis_id="1",
                                    in_vis_container_id="1"):
        print(
            "get_search_setting_graph_db in_vis_type, in_vis_id, in_vis_container_id",
            in_vis_type, in_vis_id, in_vis_container_id)
        vis_id = str(1)
        if int(in_vis_id) > 0:
            vis_id = in_vis_id
        vis_container_id = str(1)
        if int(in_vis_container_id) > 0:
            vis_container_id = in_vis_container_id
        #q = 'match (a:dataResult {tname:"forecasting"}) - [:contains] -> (f:VisSection {tname:"FBD Compare"}) - [:contains] ->  (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname, f.tname, g.tname, h.tname order by f.tname, g.pos union match ( a:dataResult {tname:"forecasting"}) - [:typeOf] -> (b:Visualization {tid:' + str(vis_id) + '}) - [r2:typeOf] -> (c:VisContainer {tid:' + str(vis_container_id) + '} ) - [:contains] -> (d:VisView {tname:"simpleFlowView"}) -[:contains] -> (e:VisControlPanel) -[:contains] -> (f:VisSection) - [:contains] -> (g:VisControlRow) - [:contains] -> (h:VisControl)    return a.tname,f.tname, g.tname, h.tname order by toInteger(f.pos), toInteger(g.pos)'

        q = 'match (a:dataResult {tname:"forecasting"}) - [:contains] -> (f:VisSection {tname:"FBD Compare"}) - [:contains] ->  (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname, f.tname, g.tname, h.tname order by f.tname, g.pos union match (s:SearchCategory {tcat: "charttype", tname:"' + str(
            in_vis_type
        ) + '"} ) - [r:has] - (f:VisSection) with f match ( a:dataResult {tname:"forecasting"}) - [:typeOf] -> (b:Visualization {tid:' + str(
            vis_id
        ) + '}) - [r2:typeOf] -> (c:VisContainer {tid:' + str(
            vis_container_id
        ) + '} ) - [:contains] -> (d:VisView {tname:"simpleFlowView"}) -[:contains] -> (e:VisControlPanel) -[:contains] -> (f) - [:contains] ->  (g:VisControlRow) - [:contains] -> (h:VisControl) return a.tname, f.tname, g.tname, h.tname order by g.pos'
        results = self.db.query(q, returns=(str, str, str, str))
        print("get_search_setting_graph_db results q", q, results)
        return results
示例#10
0
def clean_slate():
    """
    Delete all nodes and edges in Neo4j test container.
    """
    http_driver = GraphDatabase(
        DEFAULT_NEO4J_URL, username=DEFAULT_NEO4J_USERNAME, password=DEFAULT_NEO4J_PASSWORD
    )
    q = "MATCH (n) DETACH DELETE (n)"
    try:
        http_driver.query(q)
    except CypherException as ce:
        print(ce)
示例#11
0
    def calculateKnowledgeMap(self, user):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '******' RETURN s""")    
                    
        #filename = os.path.join(os.path.dirname(__file__), '../static/json/ref.json')
        #fileContent = {}
        #with open(filename, 'r') as json_data:
            #fileContent = json.load(json_data); json_data.close()                                  
                        
        vocab = {}
        ref = {}        
        lemmas = {}
        lemmaFreq = 0
        # flatten the ref and collect the vocab knowledge
        for sub in submissions.elements:            
            
            try:     
                for word in sub[0]['data']['encounteredWords']:
                        
                    try:
                        vocab[word] = vocab[word]+1
                    except KeyError as k:
                        vocab[word] = 1
                        # if vocab appears first time, get the lemmas frequency (two vocs can have same lemma, so save lemma as key)
                        try:
                            lemma = gdb.query("""MATCH (l:`Lemma`)-[:values]->(n:`Word`) WHERE n.CTS = '""" + word + """' RETURN l.value, l.frequency""")
                            if lemma.elements[0][0] is not None and lemma.elements[0][0] != "":
                                lemmas[lemma.elements[0][0]] = lemma.elements[0][1]
                        # in case of weird submission test data for encounteredWords
                        except IndexError as i:
                            continue
                    if sub[0]['data']['ref'] not in ref:
                        # get the morph info via a file lookup of submission's ref key, save params to test it on the words of the work
                        #ref[sub[0]['data']['ref']] = grammar[sub[0]['data']['ref']]
                        try:
                            params = {}
                            grammar = Grammar.objects.filter(ref=sub[0]['data']['ref'])[0].query.split('&')
                            for pair in params:
                                params[pair.split('=')[0]] = pair.split('=')[1] 
                            ref[sub[0]['data']['ref']] = params
                        except IndexError as k:
                            continue                        
            except KeyError as k:
                continue
        
        # get the lemma/vocab overall count
        for freq in lemmas:
            lemmaFreq = lemmaFreq + int(lemmas[freq])

        return [vocab, ref, lemmas, lemmaFreq]
示例#12
0
    def calculateKnowledgeMap(self, user):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '******' RETURN s""")    
                    
        #filename = os.path.join(os.path.dirname(__file__), '../static/json/ref.json')
        #fileContent = {}
        #with open(filename, 'r') as json_data:
            #fileContent = json.load(json_data); json_data.close()                                  
                        
        vocab = {}
        ref = {}        
        lemmas = {}
        lemmaFreq = 0
        # flatten the ref and collect the vocab knowledge
        for sub in submissions.elements:            
            
            try:     
                for word in sub[0]['data']['encounteredWords']:
                        
                    try:
                        vocab[word] = vocab[word]+1
                    except KeyError as k:
                        vocab[word] = 1
                        # if vocab appears first time, get the lemmas frequency (two vocs can have same lemma, so save lemma as key)
                        try:
                            lemma = gdb.query("""MATCH (l:`Lemma`)-[:values]->(n:`Word`) WHERE n.CTS = '""" + word + """' RETURN l.value, l.frequency""")
                            if lemma.elements[0][0] is not None and lemma.elements[0][0] != "":
                                lemmas[lemma.elements[0][0]] = lemma.elements[0][1]
                        # in case of weird submission test data for encounteredWords
                        except IndexError as i:
                            continue
                    if sub[0]['data']['ref'] not in ref:
                        # get the morph info via a file lookup of submission's ref key, save params to test it on the words of the work
                        #ref[sub[0]['data']['ref']] = grammar[sub[0]['data']['ref']]
                        try:
                            params = {}
                            grammar = Grammar.objects.filter(ref=sub[0]['data']['ref'])[0].query.split('&')
                            for pair in grammar:
                                params[pair.split('=')[0]] = pair.split('=')[1]
                            ref[sub[0]['data']['ref']] = params
                        except IndexError as k:
                            continue                        
            except KeyError as k:
                continue
        
        # get the lemma/vocab overall count
        for freq in lemmas:
            lemmaFreq = lemmaFreq + int(lemmas[freq])

        return [vocab, ref, lemmas, lemmaFreq]
示例#13
0
def especialidad(especialidad):
    db = GraphDatabase("http://localhost:7474", username="******", password="******")
    q = 'MATCH (e:especialista)-[r:Especializado]->(m:Doctor) RETURN e, type(r), m'
    # "db" as defined above
    results = db.query(q, returns=(client.Node, str, client.Node))
    for r in results:
        print("(%s)-[%s]->(%s)" % (r[0]["name"], r[1], r[2]["name"]))
示例#14
0
    def obj_get(self, bundle, **kwargs):

        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        word = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] +
                             '/')

        # ge the data of the word
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = word.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']
        new_obj.__dict__['_data'][
            'sentence_resource_uri'] = API_PATH + 'sentence/' + str(
                word.relationships.incoming(types=["words"])[0].start.id) + '/'

        # get the lemma
        lemmaRels = word.relationships.incoming(types=["values"])
        if len(lemmaRels) > 0:
            new_obj.__dict__['_data'][
                'lemma_resource_uri'] = API_PATH + 'lemma/' + str(
                    lemmaRels[0].start.id) + '/'

        translations = gdb.query(
            """MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" +
            word.properties['CTS'] + """' RETURN DISTINCT w ORDER BY ID(w)""")
        translationArray = []
        for t in translations:
            trans = t[0]
            url = trans['self'].split('/')
            trans['data']['resource_uri'] = API_PATH + 'word/' + url[len(url) -
                                                                     1] + '/'
            translationArray.append(trans['data'])

        new_obj.__dict__['_data']['translations'] = translationArray

        return new_obj
示例#15
0
def buildings(request):
    status={}
    if request.method == 'GET':
        gdb = GraphDatabase(NEO4J_HOST,NEO4J_USERNAME,NEO4J_PASSWORD)
        #gdb = GraphDatabase("http://localhost:7474", username="******", password="******")
        #building = gdb.labels.get('Building')
        #building.all()
        q = """MATCH (n:Building) return n.name, n.address, n.certification, n.leed_id"""
        results = gdb.query(q=q)
    
        buildings = []
        for building in results:
            building_info = {}
            building_info['name'] = building[0]
            building_info['address'] = building[1]
            building_info['certification'] = building[2]
            building_info['leed_id'] = building[3]
            buildings.append(building_info)
        
        status.update({'buildings': buildings})
        status.update({'status': 'Success'})
        return HttpResponse(json.dumps(status),content_type="application/json")
    else:
        status.update({'buildings': ''})
        status.update({'status': 'Invalid Request'})
        
    return HttpResponse(json.dumps(status))
示例#16
0
文件: word.py 项目: ThomasK81/phaidra
 def obj_get(self, bundle, **kwargs):
     
     gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
     word = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
     
     # ge the data of the word
     new_obj = DataObject(kwargs['pk'])
     new_obj.__dict__['_data'] = word.properties
     new_obj.__dict__['_data']['id'] = kwargs['pk']
     new_obj.__dict__['_data']['sentence_resource_uri'] = API_PATH + 'sentence/' + str(word.relationships.incoming(types=["words"])[0].start.id) + '/'
     
     # get the lemma
     lemmaRels = word.relationships.incoming(types=["values"])
     if len(lemmaRels) > 0:
         new_obj.__dict__['_data']['lemma_resource_uri'] = API_PATH + 'lemma/' + str(lemmaRels[0].start.id) + '/'
         
     translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" +word.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""")
     translationArray = []
     for t in translations:
         trans = t[0]
         url = trans['self'].split('/')
         trans['data']['resource_uri'] = API_PATH + 'word/' + url[len(url)-1] + '/'
         translationArray.append(trans['data'])
             
     new_obj.__dict__['_data']['translations'] = translationArray
             
     return new_obj
示例#17
0
def getAllNodesAndRelations():
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''START n=node(*) MATCH n-[r]->m RETURN n,r,m'''
	results = db1.query(q,returns=(client.Node, unicode, client.Relationship))
	print len(results)
	graph = defaultdict()
	startnode = []
	endnode = []
	rel = []
	for i in xrange(len(results)):
		for word in results[i]:
			if word.__class__.__name__ == 'unicode':
				json1_str = str(word)
				rel.append(getRelType(json1_str))
			if word.__class__.__name__ == 'Node':
				startnode.append(str(word.properties['name']))
			if word.__class__.__name__ == 'Relationship':
				endnode.append(str(word.properties['name']))

	for i in xrange(len(startnode)):
		graph[(startnode[i],endnode[i])] = rel[i]

	for word in graph:
		print word,graph[word]

	return graph
示例#18
0
class Neo4jClient:
    def __init__(self, url=None, username=None, password=None):
        if url is None:
            url = app.config["NEO4J"]["url"]
        if username is None:
            username = app.config["NEO4J"]["username"]
        if password is None:
            password = app.config["NEO4J"]["password"]

        self.gdb = GraphDatabase(url=url, username=username, password=password)

    def get_node_by_property(self, label, property, value):
        labels = self.gdb.labels.get(label)
        lookup = Q(property, contains=value)

        elements = labels.filter(lookup).elements
        data = [dict(elt.items()) for elt in elements]

        return data

    def query(self, cypher, returns=None, data_contents=False, params={}):
        data = {
            "q": cypher,
            "returns": returns,
            "data_contents": data_contents,
            "params": params,
        }

        results = self.gdb.query(**data)

        return results
示例#19
0
    def obj_get(self, bundle, **kwargs):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        lemma = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
        
        # ge the data of the word
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = lemma.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']
        
        # get the values    
        values = lemma.relationships.outgoing(types=["values"])            
        valuesArray = []
        for v in range(0, len(values), 1):
            val = values[v].end
            val.properties['resource_uri'] = API_PATH + 'word/' + str(val.id) + '/'
            val.properties['translations'] = []

            # get the full translation # force API into full representation if cache is enabled
            if bundle.request.GET.get('full'):    
                
                translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" + val.properties['CTS'] + """' RETURN DISTINCT w ORDER BY ID(w)""")
                translationArray = []
                for t in translations:
                    trans = t[0]
                    transurl = trans['self'].split('/')
                    trans['data']['resource_uri'] = API_PATH + 'word/' + transurl[len(transurl)-1] + '/'
                    translationArray.append(trans['data'])
                    val.properties['translations'] = translationArray
            
            valuesArray.append(val.properties)
            
        new_obj.__dict__['_data']['values'] = valuesArray

        return new_obj
示例#20
0
def getAllFromNeo4j(hostname, port, uname, pwd):
    url = hostname+":"+port
    db = GraphDatabase(url, username=uname, password=pwd)
    query = 'MATCH (n) return n,labels(n)'
    results = db.query(q=query)
    allTerms = []
    for r in results:  
        matchedTerm = {}
        try:
            #print r
            label = str(r[1][0])
            node_name = str(r[0]['data']['name'])
            uri = str(r[0]['data']['uri'])
            matchedTerm["Label"]=label
            myphrases = node_name
            matchedTerm["Phrases"]=myphrases
            matchedTerm["URI"]=uri
            #print("### Label:", label)
            #print("### Entity: ",node_name)
            #print("### URI: ",uri)
        except IndexError:
            print ("$$$$$ IndexError for %s")
        if label != 'Document':
            allTerms.append(matchedTerm)
    #print("----  All Terms: ",allTerms)
    return allTerms
示例#21
0
class StorageTest(TestCase):
    def setUp(self):
        self.graph = GraphDatabase(neo4j_url)

    def tearDown(self):
        storage.clear()

    def test_should_insert_person_into_graph_db_as_node(self):
        patricia = Person("Nekesa", "Patricia", datetime.now(), gender.FEMALE)
        storage.add_person(patricia)
        people = self.query('match (node) return node;')
        self.assertEqual(people[0], patricia.jsonify())

    def test_should_provide_all_people_in_graph(self):
        patricia = Person("Nekesa", "Patricia", datetime.now(), gender.FEMALE)
        jesse = Person("Wejuli", "Jesse", datetime.now(), gender.MALE)
        storage.add_person(patricia)
        storage.add_person(jesse)
        people = storage.all()
        self.assertEqual(people, [patricia.jsonify(), jesse.jsonify()])

    def query(self, query_string):
        query_sequence = self.graph.query(query_string)
        results = []
        for element in query_sequence.elements:
            results.append(element[0]['data'])
        return results
示例#22
0
文件: neo4j.py 项目: silky/timesketch
class Neo4jDataStore(object):
    """Implements the Neo4j datastore.

    Attributes:
        client: Instance of Neo4j GraphDatabase
    """
    def __init__(self, username, password, host=u'127.0.0.1', port=7474):
        """Create a neo4j client.

        Args:
            username: Neo4j username
            password: Neo4j password
            host: Neo4j host
            port: Neo4j port
        """
        super(Neo4jDataStore, self).__init__()
        self.client = GraphDatabase(u'http://{0:s}:{1:d}/db/data/'.format(
            host, port),
                                    username=username,
                                    password=password)

    @staticmethod
    def _get_formatter(output_format):
        """Get format class instance from format name.

        Args:
            output_format: Name as string of output format

        Returns:
            Output formatter object
        """
        default_output_format = u'neo4j'
        formatter_registry = {
            u'neo4j': Neo4jOutputFormatter,
            u'cytoscape': CytoscapeOutputFormatter
        }
        formatter = formatter_registry.get(output_format, None)
        if not formatter:
            formatter = formatter_registry.get(default_output_format)
        return formatter()

    def search(self, query, output_format=None, return_rows=False):
        """Search the graph.

        Args:
            query: A cypher query
            output_format: Name of the output format to use
            return_rows: Boolean indicating if rows should be returned

        Returns:
            Dictionary with formatted query result
        """
        data_content = DATA_GRAPH
        # pylint: disable=redefined-variable-type
        if return_rows:
            data_content = True
        query_result = self.client.query(query, data_contents=data_content)
        formatter = self._get_formatter(output_format)
        return formatter.format(query_result, return_rows)
示例#23
0
class Neo4jFiller:
    def __init__(self):
        self.faker = faker.Faker()

        self.db = GraphDatabase("http://localhost:7474/db/data/",
                                username=constants.USERNAME,
                                password=constants.PASSWORD)
        self.perid_max = 0

    def create_person_nodes(self, nodes_number):
        for i in range(nodes_number):
            person_node = self.db.nodes.create(name=self.faker.name(),
                                               id=self.perid_max + i)
            person_node.labels.add(constants.PERID)
            self.assign_history_node(person_node)

    def assign_history_node(self, person_node):
        for j in range(random.randint(1, 3)):
            history_node = self.db.nodes.create()
            history_node.labels.add(constants.HISID)

            history_node.relationships.create("WHO", person_node)

            self.assign_date_node(history_node)
            self.assign_action_node(history_node)
            self.assign_company_node(history_node)

    def assign_date_node(self, history_node):
        date_node = self.db.nodes.create(
            date="{} {}".format(self.faker.date(), self.faker.time()))
        history_node.relationships.create("WHEN", date_node)
        date_node.labels.add(constants.DATE)

    def assign_action_node(self, history_node):
        action_node = self.db.nodes.create(action=self.faker.catch_phrase())
        history_node.relationships.create("WHAT", action_node)
        action_node.labels.add(constants.ACTION)

    def assign_company_node(self, history_node):
        company_node = self.db.nodes.create(company=self.faker.company())
        history_node.relationships.create("WHERE", company_node)
        company_node.labels.add(constants.COMPANY)

    def drop_db(self):
        query = 'MATCH (n) DETACH DELETE n'
        self.db.query(query)
示例#24
0
    def least_recently(self, request, **kwargs):
        
        data = {}
        data['time_ranking'] = []
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        
        time = {}

        # process time of grammar of submissions of a user
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '******' RETURN s""")            
        
        # get the current time
        unix = datetime(1970,1,1)
                                    
        # get the accuray per ref key
        for sub in submissions.elements:
            
            try:
                if len(sub[0]['data']['ref']) == 0:
                    return self.error_response(request, {'error': 'Reference keys are necessary for calculating averaged lesson progress.'}, response_class=HttpBadRequest)
                
                t = dateutil.parser.parse(sub[0]['data']['timestamp'])
                t = t.replace(tzinfo=None)
                diff = (t-unix).total_seconds()
                try:
                    time[sub[0]['data']['ref']].append(diff)
                except KeyError as k:
                    time[sub[0]['data']['ref']] = []
                    time[sub[0]['data']['ref']].append(diff)
            except KeyError as k:
                continue
                
        
        # calculate the averages and sort by it
        average = {}
        for ref in time.keys():
            average[ref] = 0.0
            for value in time[ref]:
                average[ref] = average[ref] + value
                
            av = average[ref]/len(time[ref])
            av = datetime.fromtimestamp(int(av)).strftime('%Y-%m-%d %H:%M:%S')
            av = av.replace(' ', 'T')
            average[ref] = av
        
        sorted_dict = sorted(average.iteritems(), key=operator.itemgetter(1))
        #sorted_reverse = sorted_dict.reverse()
                
        for entry in sorted_dict:
            data['time_ranking'].append({'ref': entry[0],
                                         'average': average[entry[0]],
                                         'title': Grammar.objects.filter(ref=entry[0])[0].title,
                                         'query': Grammar.objects.filter(ref=entry[0])[0].query})
    
        #return the json
        return self.create_response(request, data)
示例#25
0
def execute():

	url = 'http://ec2-54-211-27-90.compute-1.amazonaws.com:8080/db/data/'

	gdb = GraphDatabase(url)

	query = "start n=node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() and n.postedTime + 3600000 > timestamp() - 24*60*60*1000 return \"0\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 2*24*60*60*1000 return \"-1\" as Day, n as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 2*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 3*24*60*60*1000 return \"-2\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 3*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 4*24*60*60*1000 return \"-3\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 4*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 5*24*60*60*1000  return \"-4\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 5*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 6*24*60*60*1000 return \"-5\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 6*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 7*24*60*60*1000 return \"-6\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 7*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 8*24*60*60*1000 return \"-7\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 8*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 9*24*60*60*1000 return \"-8\" as Day, count(n) as Tweets UNION start n = node(*) where  n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 9*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 10*24*60*60*1000 return \"-9\" as Day, count(n) as Tweets"	
	results = gdb.query(query).get_response()
	print results
示例#26
0
class Neo4j(object):
    """" Class to abstract interactions
    """
    def __init__(self, cfg):
        """ Init object
        """
        self._cfg = cfg
        self.con_gdb()
        self._labels = {}

    def con_gdb(self):
        """ connect to neo4j
        """
        url = "http://%(--neo4j-host)s:7474" % self._cfg
        try:
            self._gdb = GraphDatabase(url)
        except ConnectionError:
            time.sleep(3)
            self.con_gdb()

    def query(self, query):
        self._cfg._logger.debug(query)
        res = self._gdb.query(query, returns=Node)
        src_ret = self.unfold(res)
        return src_ret

    def unfold(self, res):
        if all([isinstance(item[0], Node) for item in res]):
            return [item[0] for item in res]
        elif isinstance(res, list):
            ret = res.pop()
            return self.unfold(ret)
        else:
            if isinstance(res, QuerySequence):
                return None
            return res

    def create_node(self, label, **kwargs):
        """ create node
        """
        if label not in self._labels:
            self._labels[label] =  self._gdb.labels.create(label)
        node = self._gdb.nodes.create(**kwargs)
        self._labels[label].add(node)
        return node

    @staticmethod
    def have_relationship(label, dst, src):
        """ Checks if node is member of partition
        :param gnode: Neo4j object of Node
        :param gpart: Neo4j object of Partition
        :return: True if member, else False
        """
        for rel in dst.relationships:
            if rel.type == label and rel.start.properties['name'] == src.properties['name']:
                return True
        return False
示例#27
0
    def least_recently(self, request, **kwargs):
        
        data = {}
        data['time_ranking'] = []
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        
        time = {}

        # process time of grammar of submissions of a user
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '******' RETURN s""")            
        
        # get the current time
        unix = datetime(1970,1,1)
                                    
        # get the accuray per ref key
        for sub in submissions.elements:
            
            try:
                if len(sub[0]['data']['ref']) == 0:
                    return self.error_response(request, {'error': 'Reference keys are necessary for calculating averaged lesson progress.'}, response_class=HttpBadRequest)
                
                t = dateutil.parser.parse(sub[0]['data']['timestamp'])
                t = t.replace(tzinfo=None)
                diff = (t-unix).total_seconds()
                try:
                    time[sub[0]['data']['ref']].append(diff)
                except KeyError as k:
                    time[sub[0]['data']['ref']] = []
                    time[sub[0]['data']['ref']].append(diff)
            except KeyError as k:
                continue
                
        
        # calculate the averages and sort by it
        average = {}
        for ref in time.keys():
            average[ref] = 0.0
            for value in time[ref]:
                average[ref] = average[ref] + value
                
            av = average[ref]/len(time[ref])
            av = datetime.fromtimestamp(int(av)).strftime('%Y-%m-%d %H:%M:%S')
            av = av.replace(' ', 'T')
            average[ref] = av
        
        sorted_dict = sorted(average.iteritems(), key=operator.itemgetter(1))
        #sorted_reverse = sorted_dict.reverse()
                
        for entry in sorted_dict:
            data['time_ranking'].append({'ref': entry[0],
                                         'average': average[entry[0]],
                                         'title': Grammar.objects.filter(ref=entry[0])[0].title,
                                         'query': Grammar.objects.filter(ref=entry[0])[0].query})
    
        #return the json
        return self.create_response(request, data)
示例#28
0
def extract_node_features(nodes, multiclass=False):
	X = []
	Y = []
	index_map = {}
	gdb = GraphDatabase('http://ec2-54-187-76-157.us-west-2.compute.amazonaws.com:7474/db/data/')
	for i, node in enumerate(nodes):
		# phi = [handle_length, num_non_alpha in handle, belief, num_links, |indicators for source urls|]
		phi = []
		node_handle = node['node_handle']
		# handle_length
		phi.append(len(node_handle))
		# num_non_alpha characters
		phi.append(len([c for c in node_handle if not c.isalpha()]))
		q = 'MATCH (n{handle:' + node_handle + '})-[r]-(x) RETURN r, n, x'
		links = gdb.query(q=q)
		source_urls = set()
		belief = 0
		neighbor_beliefs = []
		for link in links:
			s_url = link[0]['data']['source_url']
			source_urls.add(s_url)
			try:
				belief = link[1]['data']['belief']
			except KeyError:
				pass
		#belief
		phi.append(belief)
		# num_links
		phi.append(len(links))
		# indicator variables for urls
		for source in GRAPH_SOURCES:
			if source in source_urls:
				phi.append(1)
			else:
				phi.append(0)
		action_type = node['action_type']
		if not multiclass:
			# binary classification, 'GOOD_NODE' = 1
			if action_type == "'GOOD_NODE'":
				Y.append(1)
			else:
				Y.append(2)
		else:
			# multiclass classification
			if action_type == "'GOOD_NODE'":
				Y.append(1)
			elif action_type == "'REMOVE_NODE'":
				Y.append(2)
			elif action_type == "'SPLIT_NODE'":
				Y.append(3)
			elif action_type == "'RENAME_NODE'":
				Y.append(4)
			else:
				print action_type
		index_map[node['id_node']] = i
		X.append(phi)
	return X, Y, index_map
示例#29
0
def run_query(cypher_query, returns=None, data_contents=None):
    gdb = GraphDatabase("http://localhost:27474",
                        username='******',
                        password='******')

    result = gdb.query(cypher_query,
                       returns=returns,
                       data_contents=data_contents)
    return result
示例#30
0
class GraphDB(object):

	def __init__(self, database="http://localhost:7474/db/data"):
		self.gdb = GraphDatabase(database)

	def addPaper(self, uid, title, authors):
		new_node = self.gdb.node()
		new_node.labels.add('Paper')
		new_node['uid'] = uid
		new_node['title'] = title
		new_node['authors'] = authors

	def getNode(self, uid):
		get_query = 'MATCH (n:Paper) WHERE n.uid=%d RETURN n'%uid
		qRes = self.gdb.query(q=get_query, returns=Node)
		if qRes == None:
			return None
		return qRes[0][0] #First element of first result is the expected node

	def editPaper(self, uid, key, value):
		node = self.getNode(uid)
		if not node:
			return False

		node.set(key, value)

	def deletePaper(self, uid):
		delQuery = 'MATCH (n { uid: %d })-[r]-() DELETE n, r'%uid
		try:
			self.gdb.query(q = delQuery)
		except e:
			return False

		return True

	def setReference(self, sourceUID, targetUID):
		srcNode = self.getNode(sourceUID)
		targetNode = self.getNode(targetUID)
		if srcNode ==None or targetNode ==None:
			return False

		newRel = srcNode.relationships.create("REFERENCE", targetNode)
		return True
示例#31
0
文件: neo.py 项目: vindurriel/nest
    def get(self, params={}):
        depth = params.get("depth", 3)
        q = params.get("q", "artist_10165")
        query = u"""start n=node:node_auto_index(id="{}") match n-[r*1..{}]-m  return m,r""".format(q, depth)
        print query
        from neo4jrestclient.client import GraphDatabase, Node, Relationship

        gdb = GraphDatabase("http://localhost:7474/db/data")
        res = gdb.query(q=query, returns=(Node, Relationship))
        return res
示例#32
0
def getAllNodes():
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''START n=node(*) RETURN n LIMIT 5'''
	results = db1.query(q,returns=client.Node)
	nodes = []
	print len(results)
	for i in xrange(len(results)):
		for word in results[i]:
			nodes.append(str(word.properties['name']))
	return nodes
示例#33
0
def _get_server_address(student_id):
    #neo4jから生徒情報サーバのIPアドレスを取得する
    url = "http://133.16.239.120:7474/db/data/"
    gdb = GraphDatabase(url)

    query = "MATCH(n:Student{{id:'{}'}}) RETURN n.server;"
    result = gdb.query(query.format(student_id), data_contents=True)
    result = result[0][0]

    return result
 def getAlarmJSONgraph(
         cypherQuery='MATCH (L:Alarm)-[R:RAISED_ON]->(O) RETURN L, O, R'):
     global gdb
     if not gdb:
         gdb = GraphDatabase(DATABASE_REST_CONN)
     results = gdb.query(cypherQuery, data_contents=True)
     jsonDp = results.graph
     nodeArray = dict()
     relArray = dict()
     if jsonDp is None:
         return {}
     for item in jsonDp:
         for rel in item["relationships"]:
             objNodes = item["nodes"]
             raisedOnNode = None
             alarmNode = None
             for nd in objNodes:
                 fromNodeArray = nodeArray.get(nd["id"], None)
                 useThisNode = None
                 if fromNodeArray:
                     useThisNode = fromNodeArray
                 else:
                     nodeArray[nd["id"]] = nd
                     useThisNode = nd
                     #logger.info("--> New Node: %s", useThisNode["id"])
                 if rel["type"] == "RAISED_ON":
                     if useThisNode["id"] == rel["endNode"]:
                         raisedOnNode = useThisNode
                     elif useThisNode["id"] == rel["startNode"]:
                         alarmNode = useThisNode
                     if raisedOnNode and alarmNode:
                         if not "alarms" in raisedOnNode["properties"].keys(
                         ):
                             raisedOnNode["properties"]["alarms"] = list()
                         alarminfo = {
                             "alarmText":
                             alarmNode["properties"]["alarmText"],
                             "alarmClass":
                             alarmNode["properties"]["alarmClass"],
                             "alarmState":
                             alarmNode["properties"]["alarmState"]
                         }
                         raisedOnNode["properties"]["alarms"].append(
                             alarminfo)
             relArray[rel["id"]] = rel
             #logger.info("--> New Relationship: %s", rel["id"])
     d3json = [{
         "nodes": list(nodeArray.values())
     }, {
         "relationships": list(relArray.values())
     }]
     #logger.info("::getAlarmJSONgraph:: %s", d3json)
     return d3json
示例#35
0
def UpsertItem(itemid):
    db = GraphDatabase("http://localhost:7474/db/data/")
    item_query = "START ee=node(*) WHERE ee.itemid! = \"" + itemid + "\" RETURN ee;"

    result = db.query(q=item_query, returns=(client.Node, unicode, client.Relationship))
    if len(result) == 0:
        item = db.nodes.create(itemid=itemid)
    else:
        for node in result:
            item = node.pop()

    return item
示例#36
0
def getFoes(name):
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''MATCH (n { name: \''''+name+'''\'})-[r]->m WHERE type(r) = 'FOE' RETURN n,r,m'''
	results = db1.query(q,returns=(client.Node, unicode, client.Relationship))
	endnode = []
	for i in xrange(len(results)):
		for word in results[i]:
			if word.__class__.__name__ == 'Relationship':
				endnode.append(str(word.properties['name']))

	print endnode
	return endnode
示例#37
0
def UpsertUser(userid):
    db = GraphDatabase("http://localhost:7474/db/data/")
    user_query = "START ee=node(*) WHERE ee.userid! = \"" + userid + "\" RETURN ee;"

    result = db.query(q=user_query, returns=(client.Node, unicode, client.Relationship))
    if len(result) == 0:
        user = db.nodes.create(userid=userid)
    else:
        for node in result:
            user = node.pop()

    return user
示例#38
0
def do_graph_query(request):
    """Make query to Neo4j, return names of associated nodes."""

    url = "http://35.161.86.89:7474/db/data"
    gdb = GraphDatabase(url, username="******", password="******")

    q = "MATCH (n:Drink) RETURN n LIMIT 25"
    results = gdb.query(q=q, data_contents=True)

    result = results.rows

    return result
示例#39
0
def extract_link_features(links):
	X = []
	Y = []
	index_map = {}
	print links
	for i, link in enumerate(links):
		phi = []
		p = re.compile("'link([A-Z_a-z\.0-9]+)To([A-Z_a-z\.0-9]+)'")
		m = p.match(link['id_node'])
		if m == None:
			p = re.compile("'link([A-Z_a-z\.0-9]+)to([A-Z_a-z\.0-9]+)'")
			m = p.match(link['id_node'])
		start_id = m.group(1)
		end_id = m.group(2)
		gdb = GraphDatabase('http://ec2-54-187-76-157.us-west-2.compute.amazonaws.com:7474/db/data/')
		q = "MATCH (n{handle:'" + start_id + "'})-[r]-(x{handle:'" + end_id + "'}) RETURN n, x, r"
		# q = 'MATCH (n{handle:' + node_handle + '})-[r]-(x) RETURN r, n, x'
		result = gdb.query(q=q)
		length = len(result)
		prodsum = 1
		if 'belief' in result[0][0]['data']:
			prodsum = prodsum*result[0][0]['data']['belief']
		if 'belief' in result[0][1]['data']:
			prodsum = prodsum*result[0][1]['data']['belief']
		q = "MATCH (n{handle:'" + start_id + "'})-[r]-(x) RETURN r"
		result = gdb.query(q=q)
		first = len(result)
		q = "MATCH (n{handle:'" + end_id + "'})-[r]-(x) RETURN r"
		result = gdb.query(q=q)
		noOfLinks= first+len(result)
		phi.append(noOfLinks)
		feedback_type = link['feedback_type']
		# binary classification, 'agree' = 1
		if 'disagee' in feedback_type:
			Y.append(2)
		else:
			Y.append(1)
		index_map[link['id_node']] = i
		X.append(phi)
	return X, Y, index_map
def build_graphdb(args):

    url = 'http://{}:{}@localhost:7474/db/data/'.format(args.user, args.pw)
    db = GraphDatabase(url)

    # delete
    db.query("MATCH (n) OPTIONAL MATCH (n)-[r]-() DELETE n,r",
             data_contents=True)

    print('adding nodes...')
    node_dict = {}
    for line in open(args.node):
        name = line.strip()
        node_dict[name] = db.nodes.create(name=name)
        node_dict[name].labels.add('entity')

    print('adding edges...')
    for line in open(args.triple):
        sub, rel, obj = line.strip().split('\t')
        node_dict[sub].relationships.create(rel, node_dict[obj])

    print('DONE')
示例#41
0
def to_csv(fname):
    import pandas as pd
    from neo4jrestclient.client import GraphDatabase
    gdb = GraphDatabase("http://localhost:7474/db/data/",
                        username="******",
                        password="******")

    q = "MATCH (da_start:TIMEPOINT)-[da:DIALACT]->(da_end:TIMEPOINT) RETURN da"
    dialogs = gdb.query(q=q)
    result = []
    for da in dialogs:
        result.append(da[0]["data"])
    df = pd.DataFrame(result)
    df.to_csv(fname)
示例#42
0
文件: api.py 项目: daniels-mer/kmap
    def dehydrate(self, bundle):
        links = []
        gdb = GraphDatabase("http://localhost:7474/db/data/")
        query = """START a = node:`kmap-Concept`(label = "%s")
                 MATCH a<-[:concepts]-b-[:concepts]->c
                 RETURN b, c;
                """ % bundle.data["label"]
        data = gdb.query(q=query, returns=(Node, Node))
        for datum in data:
            links.append({"type" : datum[0]["type"], 
                          "label" : datum[1]["label"]})

        bundle.data["links"] = links
        return bundle
示例#43
0
def neo4j_to_lkg():
    """
    This function reads the NEO4J graph stored in the neo4j browser and converts it into a networkx graph
    """
    node_types = ["judge", "keyword", "case", "catch", "act", "year"]
    from backend.graph_formation.base.legal_knowledge_graph import LegalKnowledgeGraph

    lkg = LegalKnowledgeGraph()
    db = GraphDatabase(ENV["DB_URL"],
                       username=ENV["DB_USERNAME"],
                       password=ENV["DB_PASSWORD"])
    # Authentication for NEO4J Browser

    for node_type in node_types:
        q = "MATCH (c:{}) return c".format(
            node_type)  #Quering for all nodes in the graph
        results = db.query(q)
        for record in results:
            props = {}
            node = record[0]
            if node:
                label = node["metadata"]["labels"]
                node_id = node["data"]["id"]
                node["data"].pop("id", None)
                props = node["data"]
                props["type"] = label
                lkg.add_node(id, **props)
    for node_type_1 in node_types:
        for node_type_2 in node_types:
            q = "MATCH (c:{})-[r]->(m:{}) return c,m".format(
                node_type_1,
                node_type_2)  # Quering for all Relationships in the graph
            results = db.query(q)
            for record in results:
                node1, node2 = record
                lkg.add_edge(node1["data"]["id"], node2["data"]["id"])
    return (lkg)
示例#44
0
 def statistics(self, request, **kwargs):
     
     data = {}
     gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
     
     # get the user
     if request.GET.get('user'):
         user = request.GET.get('user')
     else:
         user= request.user.username
         
     try:
         userTable = gdb.query("""MATCH (n:`User`) WHERE n.username =  '******' RETURN n""")
         userNode = gdb.nodes.get(userTable[0][0]['self'])
     except:
         return self.error_response(request, {'error': 'No neo4j node exists for the user: "******". Make sure you have submissions and user is logged in or passed.'}, response_class=HttpBadRequest)
     
     # preprocess knowledge of a user; callFunction = self.calculateKnowledgeMap(request.GET.get('user')); vocKnowledge = callFunction[0]; refFlat = callFunction[1]; lemmaFreqs = callFunction[2]
     knows_vocab = 0
     knows_grammar = 0
     knows_syntax = 0    
     # get the sentences of that document
     sentenceTable = gdb.query("""MATCH (n:`Document`)-[:sentences]->(s:`Sentence`)-[:words]->(w:`Word`) WHERE HAS (n.CTS) AND n.CTS = '""" +request.GET.get('range')+ """' RETURN count(w)""")
     all = sentenceTable[0][0]
     
     sentenceTable = gdb.query("""MATCH (u:`User`)-[:has_seen]->(l:`Lemma`) WHERE u.username='******' RETURN l""")
     
     for lemma in sentenceTable:
         knows_vocab = knows_vocab + lemma[0]['data']['frequency'] 
     
     knows_grammar = len(userNode.relationships.outgoing(["knows_morph"])[:])
     
     # after reading everything return the statistics
     data['statistics'] = {'all': all, 'vocab': float(knows_vocab)/float(all), 'morphology': float(knows_grammar)/float(all), 'syntax': float(knows_syntax)/float(all)}
 
     return self.create_response(request, data)
示例#45
0
 def statistics(self, request, **kwargs):
     
     data = {}
     gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
     
     # get the user
     if request.GET.get('user'):
         user = request.GET.get('user')
     else:
         user= request.user.username
         
     try:
         userTable = gdb.query("""MATCH (n:`User`) WHERE n.username =  '******' RETURN n""")
         userNode = gdb.nodes.get(userTable[0][0]['self'])
     except:
         return self.error_response(request, {'error': 'No neo4j node exists for the user: "******". Make sure you have submissions and user is logged in or passed.'}, response_class=HttpBadRequest)
     
     # preprocess knowledge of a user; callFunction = self.calculateKnowledgeMap(request.GET.get('user')); vocKnowledge = callFunction[0]; refFlat = callFunction[1]; lemmaFreqs = callFunction[2]
     knows_vocab = 0
     knows_grammar = 0
     knows_syntax = 0    
     # get the sentences of that document
     sentenceTable = gdb.query("""MATCH (n:`Document`)-[:sentences]->(s:`Sentence`)-[:words]->(w:`Word`) WHERE HAS (n.CTS) AND n.CTS = '""" +request.GET.get('range')+ """' RETURN count(w)""")
     all = sentenceTable[0][0]
     
     sentenceTable = gdb.query("""MATCH (u:`User`)-[:has_seen]->(l:`Lemma`) WHERE u.username='******' RETURN l""")
     
     for lemma in sentenceTable:
         knows_vocab = knows_vocab + lemma[0]['data']['frequency'] 
     
     knows_grammar = len(userNode.relationships.outgoing(["knows_morph"])[:])
     
     # after reading everything return the statistics
     data['statistics'] = {'all': all, 'vocab': float(knows_vocab)/float(all), 'morphology': float(knows_grammar)/float(all), 'syntax': float(knows_syntax)/float(all)}
 
     return self.create_response(request, data)
示例#46
0
def UpsertRating(user, item, rating):
    db = GraphDatabase("http://localhost:7474/db/data/")

    rating_query = "START a = node(" + str(user.id) + ") MATCH a-[r]-b WHERE b.itemid=\""
    rating_query += str(item.properties["itemid"]) + "\" RETURN r;"

    params = {}
    result = db.query(rating_query, params=params, returns=(client.Node, unicode, client.Relationship))
    if len(result) == 0:
        user.relationships.create("Rated", item, rating=rating)
    else:
        for rel in result:
            r = rel.pop()
            if r["rating"] != rating:
                r["rating"] = rating
示例#47
0
def shortest_path(origin,destiny):
    db = GraphDatabase("http://localhost:7474/db/data/")
    q = lambda origin, destiny: """MATCH (from), (to) , path = (from)-[:DOMAIN_LINK*]->(to)
        WHERE from.prop_node_id='%s' AND to.prop_node_id='%s'
        RETURN path AS shortestPath,
        reduce(distance = 0, r in relationships(path) | distance+r.STFIPS1) AS totalDistance,
        nodes(path)
        ORDER BY totalDistance ASC
        LIMIT 1"""%(origin,destiny)
    path = db.query(q(origin,destiny))[0]
    if path==[]:
        print "There is no path"
        return path
    names = [x['data']['prop_node_id'] for x in path[2]]
    print 'Cheaper path: '+' - '.join(names)
    return path
示例#48
0
def getRelationship(name1,name2):
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''MATCH (n { name: \''''+name1+'''\'})-[r]->(m { name: \''''+name2+'''\'}) RETURN n,r,m'''
	results = db1.query(q,returns=(client.Node, unicode, client.Relationship))
	rel = []
	for i in xrange(len(results)):
		for word in results[i]:
			if word.__class__.__name__ == 'unicode':
				json1_str = str(word)
				rel.append(getRelType(json1_str))
				break

	if(len(rel)>=1):
		print rel[0]
		return rel[0]
	return 0
def prepare(fromUser):
    db = GraphDatabase("http://localhost:7474", username="******", password="******")
    nodesQuery = "MATCH (n) RETURN n.name"
    nodeValues = db.query(nodesQuery, data_contents=True)
    inputList = []
    input = []

    for i in range(0,len(nodeValues)):
        inputList.append(nodeValues[i][0])
        #print inputList[i]

    for item in fromUser:
        if item in inputList:
            input.append(item)

    return input
示例#50
0
    def post_list(self, request, **kwargs):
        """
        Create a new document object and return it if the user is authenticated and exists. Create a new neo node in case the users doesn't exist on this side.
        """
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        
        self.method_check(request, allowed=['post'])
        self.is_authenticated(request)
        
        if not request.user or not request.user.is_authenticated():
            return self.create_response(request, { 'success': False, 'error_message': 'You are not authenticated, %s.' % request.user })

        data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))

        # get the user via neo look-up or create a newone
        if request.user.username is not None:
            userTable = gdb.query("""MATCH (u:`User`) WHERE HAS (u.username) AND u.username='******' RETURN u""")
        
            if len(userTable) > 0:    
                userurl = userTable[0][0]['self']
                userNode = gdb.nodes.get(userurl)            
            
            else:
                userNode = gdb.nodes.create(username=request.user.username)
                userNode.labels.add("User")
            
            document = gdb.nodes.create(
                CTS = data.get("CTS"),  
                author = data.get("author"), 
                lang = data.get("lang"),    
                name = data.get("name")
            )
            document.labels.add("UserDocument")
            data['resource_uri'] = '/api/v1/user_document/' + str(document.id) + '/'
            data['user'] = request.user.username
            
            if document is None :
                # in case an error wasn't already raised             
                raise ValidationError('Document node could not be created.')
        
            # Form the connections from the new Submission node to the existing slide and user nodes
            userNode.owns(document)
                
            return self.create_response(request, data)
        
        else:
            return self.error_response(request, {'error': 'User is required.' }, response_class=HttpBadRequest)
    def  getNewUserId(self):
       elements=[]
       conf= DBConf.DBConf()
       elements =conf.getNeo4jConfig()
       dbUrl = elements[0]
       dbUser = elements[1]
       dbPass = elements[2]


       db =GraphDatabase(dbUrl,dbUser,dbPass)
       query ="MATCH (`n: *`) RETURN count(*) as c"
       results =  db.query(query,returns = (str))
       for res in results:
           val= res[0]
       
       
       return int(val)+1
示例#52
0
def neo():
    db = GraphDatabase("http://localhost:7474", username="******", password="******")
    tag_value = False
    res = []
    try:
        tag_value = request.form.getlist('tag_query')
    except:
        pass
    if tag_value:
        res = []
        for tag in tag_value[:-1]:
            query = 'MATCH (t:Texts)-[r:contains]->(m:Mistake) WHERE m.name="%s" RETURN t, type(r), m' % tag
            results = db.query(query, returns=(client.Node, str, client.Node))
            for r in results:
                line = r[0]["name"] + ' ' + r[1] + ' ' + r[2]["name"]
                res.append(line)
    return render_template("neo.html", res=res, tags=tags)
示例#53
0
def cityDiff(queryDict):
    #连接neo4j数据库
    gdb = GraphDatabase("http://localhost:7474/db/data/",
                        username="******",
                        password="******")

    if queryDict['consultant'] == '':
        a = ''
        a1 = ''
    else:
        a = " a.IsCon = '" + queryDict['consultant'] + "' "
        a1 = " and "

    if queryDict['order'] == '':
        b = ''
        b1 = ''
    else:
        b = " a.IsOrder = '" + queryDict['order'] + "' "
        b1 = " and "

    if queryDict['city'] == '':
        c = ''
    else:
        c = " a.City = '" + queryDict['city'] + "' "

    if queryDict['consultant'] == '' and queryDict[
            'order'] == '' and queryDict['city'] == '':
        d = '  WHERE EXISTS(a.City) and a.ShareId<>"NULL"'
    else:
        d = (' WHERE EXISTS(a.City) and a.ShareId<>"NULL" and ' + a + a1 + b +
             b1 + c).rstrip().rstrip('and')

    #各个城市的数量
    q4 = "match (a:CITY) " + d + " return a.City,count(*) order by count(*) desc"
    #执行cypher语句
    cityDiffusion = gdb.query(q4)
    cityDiff = {}
    result1 = []
    result2 = []
    for a in cityDiffusion:
        result1.append(a[0])
        result2.append(a[1])
    cityDiff['city'] = result1
    cityDiff['cityCount'] = result2
    return cityDiff
示例#54
0
def prepare(fromUser):
    db = GraphDatabase("http://localhost:7474",
                       username="******",
                       password="******")
    nodesQuery = "MATCH (n) RETURN n.name"
    nodeValues = db.query(nodesQuery, data_contents=True)
    inputList = []
    input = []

    for i in range(0, len(nodeValues)):
        inputList.append(nodeValues[i][0])
        #print inputList[i]

    for item in fromUser:
        if item in inputList:
            input.append(item)

    return input
示例#55
0
def test():

    gdb = GraphDatabase("http://localhost:7474/db/data/")
    person=gdb.labels.get("Person")
    # for p in person.get(name='Lana Wachowski'):
    # #for p in person.filter(gdb.query("born","gte",1970)):
    #     print p["name"]
    #     print p.properties
    #     print p["born"]

    #qry="start n=node(10) match n-[r]-() return n, n.name, r"
    #qry="MATCH (n { name: 'Tom Cruise' })-[r:ACTED_IN]->(c) RETURN c.title,r.roles"
    #qry="MATCH (n:Person {name:'Tom Cruise'} )return n.name, n.type"
    qry='MATCH (a { name: "Tom Cruise" })-[r:ACTED_IN]->(m) RETURN a.born > 1920, "Im a literal",(a)-->(),"acted in "+m.title'
    res=gdb.query(qry)
    #results = gdb.query(res, returns=(client.Node, unicode, client.Relationship))
    for i in res:
        print i[3]
示例#56
0
文件: lemma.py 项目: prival/phaidra
    def obj_get(self, bundle, **kwargs):

        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        lemma = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" +
                              kwargs['pk'] + '/')

        # ge the data of the word
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = lemma.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']

        # get the values
        values = lemma.relationships.outgoing(types=["values"])
        valuesArray = []
        for v in range(0, len(values), 1):
            val = values[v].end
            val.properties['resource_uri'] = API_PATH + 'word/' + str(
                val.id) + '/'
            val.properties['translations'] = []

            # get the full translation # force API into full representation if cache is enabled
            if bundle.request.GET.get('full'):

                translations = gdb.query(
                    """MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='"""
                    + val.properties['CTS'] +
                    """' RETURN DISTINCT w ORDER BY ID(w)""")
                translationArray = []
                for t in translations:
                    trans = t[0]
                    transurl = trans['self'].split('/')
                    trans['data'][
                        'resource_uri'] = API_PATH + 'word/' + transurl[
                            len(transurl) - 1] + '/'
                    translationArray.append(trans['data'])
                    val.properties['translations'] = translationArray

            valuesArray.append(val.properties)

        new_obj.__dict__['_data']['values'] = valuesArray

        return new_obj
示例#57
0
    def least_accurate(self, request, **kwargs):
        
        data = {}
        data['accuracy_ranking'] = []
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        
        accuracy = {}

        # process accuracy of grammar of submissions of a user
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '******' RETURN s""")            
                                    
        # get the accuray per ref key
        for sub in submissions.elements:
            try:                             
                try:
                    accuracy[sub[0]['data']['ref']].append(sub[0]['data']['accuracy'])  
                except KeyError as k:
                    accuracy[sub[0]['data']['ref']] = []
                    accuracy[sub[0]['data']['ref']].append(sub[0]['data']['accuracy'])
            except KeyError as k:
                continue
                
        
        # calculate the averages and sort by it
        average = {}
        for ref in accuracy.keys():
            average[ref] = 0.0
            for value in accuracy[ref]:
                average[ref] = average[ref] + value
            average[ref] = average[ref]/len(accuracy[ref]) 
        
        sorted_dict = sorted(average.iteritems(), key=operator.itemgetter(1))
        #sorted_reverse = sorted.reverse()                         
                
        for entry in sorted_dict:
            data['accuracy_ranking'].append({'ref': entry[0], 'average': average[entry[0]],
                                             'title': Grammar.objects.filter(ref=entry[0])[0].title,
                                             'query': Grammar.objects.filter(ref=entry[0])[0].query})
    
        #return the json
        return self.create_response(request, data)