def __init__(self): global graph self.env = Env(decodeResponses=True) graph = Graph('update', self.env.getConnection()) # create a single node with attribute 'v' graph.query("CREATE ({v:1})")
def test14_post_deletion_traversal_directions(self): self.env.flush() redis_con = self.env.getConnection() redis_graph = Graph("G", redis_con) nodes = {} # Create entities. labels = ["Dest", "Src", "Src2"] for idx, l in enumerate(labels): node = Node(label=l, properties={"val": idx}) redis_graph.add_node(node) nodes[l] = node edge = Edge(nodes["Src"], "R", nodes["Dest"]) redis_graph.add_edge(edge) edge = Edge(nodes["Src2"], "R", nodes["Dest"]) redis_graph.add_edge(edge) redis_graph.commit() # Delete a node. query = """MATCH (n:Src2) DELETE n""" actual_result = redis_graph.query(query) self.env.assertEquals(actual_result.nodes_deleted, 1) self.env.assertEquals(actual_result.relationships_deleted, 1) query = """MATCH (n1:Src)-[*]->(n2:Dest) RETURN COUNT(*)""" actual_result = redis_graph.query(query) expected_result = [[1]] self.env.assertEquals(actual_result.result_set, expected_result) # Perform the same traversal, this time traveling from destination to source. query = """MATCH (n1:Src)-[*]->(n2:Dest {val: 0}) RETURN COUNT(*)""" actual_result = redis_graph.query(query) expected_result = [[1]] self.env.assertEquals(actual_result.result_set, expected_result)
def test_v6_decode(self): graph_name = "v6_rdb_restore" # dump created with the following query (v6 supported property value: integer, double, boolean, string, null, array) # graph.query g "CREATE (:L1 {val:1, strval: 'str', numval: 5.5, nullval: NULL, boolval: true, array: [1,2,3]})-[:E{val:2}]->(:L2{val:3})" # graph.query g "CREATE INDEX ON :L1(val)" # dump g v6_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\x06\x05\x02g\x00\x02\x06\x05\x04val\x00\x05\astrval\x00\x05\anumval\x00\x05\bnullval\x00\x05\bboolval\x00\x05\x06array\x00\x02\x02\x02\x00\x05\x03L1\x00\x02\x01\x02\x00\x05\x04val\x00\x02\x01\x05\x03L2\x00\x02\x00\x02\x01\x02\x00\x05\x02E\x00\x02\x00\x02\x02\x02\x01\x02\x00\x02\x06\x05\x04val\x00\x02`\x00\x02\x01\x05\astrval\x00\x02H\x00\x05\x04str\x00\x05\anumval\x00\x02\x80\x00\x00@\x00\x04\x00\x00\x00\x00\x00\x00\x16@\x05\bnullval\x00\x02\x80\x00\x00\x80\x00\x05\bboolval\x00\x02P\x00\x02\x01\x05\x06array\x00\x02\b\x02\x03\x02`\x00\x02\x01\x02`\x00\x02\x02\x02`\x00\x02\x03\x02\x01\x02\x01\x02\x01\x05\x04val\x00\x02`\x00\x02\x03\x02\x01\x02\x00\x02\x01\x02\x00\x02\x01\x05\x04val\x00\x02`\x00\x02\x02\x00\t\x00\xd9\r\xb4c\xf2Z\xd9\xb3" redis_con.restore(graph_name, 0, v6_rdb, True) redis_graph = Graph(graph_name, redis_con) node0 = Node(node_id=0, label='L1', properties={ 'val': 1, 'strval': 'str', 'numval': 5.5, 'boolval': True, 'array': [1, 2, 3] }) node1 = Node(node_id=1, label='L2', properties={'val': 3}) edge01 = Edge(src_node=0, relation='E', dest_node=1, edge_id=0, properties={'val': 2}) results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m") self.env.assertEqual(results.result_set, [[node0, edge01, node1]]) plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n") self.env.assertIn("Index Scan", plan) results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n") self.env.assertEqual(results.result_set, [[node0]])
def test_09_concurrent_multiple_readers_after_big_write(self): # Test issue #890 global assertions global exceptions redis_con = self.env.getConnection() redis_graph = Graph("G890", redis_con) redis_graph.query("""UNWIND(range(0,999)) as x CREATE()-[:R]->()""") read_query = """MATCH (n)-[r:R]->(m) RETURN n, r, m""" assertions = [True] * CLIENT_COUNT exceptions = [None] * CLIENT_COUNT threads = [] for i in range(CLIENT_COUNT): t = threading.Thread(target=thread_run_query, args=(redis_graph, read_query, i)) t.setDaemon(True) threads.append(t) t.start() for i in range(CLIENT_COUNT): t = threads[i] t.join() for i in range(CLIENT_COUNT): self.env.assertIsNone(exceptions[i]) self.env.assertEquals(1000, len(assertions[i].result_set))
def test27_merge_create_invalid_entity(self): # Skip this test if running under Valgrind, as it causes a memory leak. if Env().envRunner.debugger is not None: Env().skip() redis_con = self.env.getConnection() graph = Graph("N", redis_con) # Instantiate a new graph. try: # Try to create a node with an invalid NULL property. query = """MERGE (n {v: NULL})""" graph.query(query) assert (False) except redis.exceptions.ResponseError as e: # Expecting an error. assert ("Cannot merge node using null property value" in str(e)) pass # Verify that no entities were created. query = """MATCH (a) RETURN a""" result = graph.query(query) self.env.assertEquals(result.result_set, []) try: # Try to merge a node with a self-referential property. query = """MERGE (a:L {v: a.v})""" graph.query(query) assert (False) except redis.exceptions.ResponseError as e: # Expecting an error. self.env.assertIn("undefined property", str(e))
def test04_repeated_edges(self): graphname = "repeated_edges" g = Graph(graphname, redis_con) src = Node(label='p', properties={'name': 'src'}) dest = Node(label='p', properties={'name': 'dest'}) edge1 = Edge(src, 'e', dest, properties={'val': 1}) edge2 = Edge(src, 'e', dest, properties={'val': 2}) g.add_node(src) g.add_node(dest) g.add_edge(edge1) g.add_edge(edge2) g.commit() # Verify the new edge q = """MATCH (a)-[e]->(b) RETURN e.val, a.name, b.name ORDER BY e.val""" actual_result = g.query(q) expected_result = [[ edge1.properties['val'], src.properties['name'], dest.properties['name'] ], [ edge2.properties['val'], src.properties['name'], dest.properties['name'] ]] assert (actual_result.result_set == expected_result) # Save RDB & Load from RDB redis_con.execute_command("DEBUG", "RELOAD") # Verify that the latest edge was properly saved and loaded actual_result = g.query(q) assert (actual_result.result_set == expected_result)
def test21_merge_scan(self): redis_con = self.env.getConnection() graph = Graph("M", redis_con) # Starting with an empty graph. # All node scan should see created nodes. self.env.flush() query = """MERGE (a {v:1}) WITH a MATCH (n) MERGE (n)-[:KNOWS]->(m)""" result = graph.query(query) # Verify that every entity was created. self.env.assertEquals(result.nodes_created, 2) self.env.assertEquals(result.relationships_created, 1) self.env.assertEquals(result.properties_set, 1) # Starting with an empty graph. # Label scan should see created nodes. self.env.flush() query = """MERGE (a:L {v:1}) WITH a MATCH (n:L) MERGE (n)-[:KNOWS]->(m)""" result = graph.query(query) # Verify that every entity was created. self.env.assertEquals(result.nodes_created, 2) self.env.assertEquals(result.relationships_created, 1) self.env.assertEquals(result.properties_set, 1)
def test15_update_deleted_entities(self): self.env.flush() redis_con = self.env.getConnection() redis_graph = Graph("delete_test", redis_con) src = Node() dest = Node() edge = Edge(src, "R", dest) redis_graph.add_node(src) redis_graph.add_node(dest) redis_graph.add_edge(edge) redis_graph.flush() # Attempt to update entities after deleting them. query = """MATCH (a)-[e]->(b) DELETE a, b SET a.v = 1, e.v = 2, b.v = 3""" actual_result = redis_graph.query(query) self.env.assertEquals(actual_result.nodes_deleted, 2) self.env.assertEquals(actual_result.relationships_deleted, 1) # No properties should be set. # (Note that this behavior is left unspecified by Cypher.) self.env.assertEquals(actual_result.properties_set, 0) # Validate that the graph is empty. query = """MATCH (a) RETURN a""" actual_result = redis_graph.query(query) expected_result = [] self.env.assertEquals(actual_result.result_set, expected_result)
def test02_no_compaction_on_nodes_delete(self): graph_name = "no_compaction_on_nodes_delete" redis_graph = Graph(graph_name, redis_con) # Create 20 nodes meta keys redis_graph.query("UNWIND range(0, 20) as i CREATE (:Node)") # Return all the nodes, before and after saving & loading the RDB, and check equality query = "MATCH (n:Node) WITH n ORDER by id(n) return COLLECT(id(n))" expected_full_graph_nodes_id = redis_graph.query(query) # Delete 3 nodes. redis_graph.query("MATCH (n:Node) WHERE id(n) IN [7, 14, 20] DELETE n") expected_nodes_id_after_delete = redis_graph.query(query) # Save RDB & Load from RDB redis_con.execute_command("DEBUG", "RELOAD") actual = redis_graph.query(query) # Validate no compaction, all IDs are the same self.env.assertEquals(expected_nodes_id_after_delete.result_set, actual.result_set) # Validate reuse of node ids - create 3 nodes. redis_graph.query("UNWIND range (0, 2) as i CREATE (:Node)") actual = redis_graph.query(query) self.env.assertEquals(expected_full_graph_nodes_id.result_set, actual.result_set)
def test_array_functions(self): redis_graph = Graph('social', self.r) query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})""" redis_graph.query(query) query = """WITH [0,1,2] as x return x""" result = redis_graph.query(query) self.assertEqual([0, 1, 2], result.result_set[0][0]) query = """MATCH(n) return collect(n)""" result = redis_graph.query(query) a = Node(node_id=0, label='person', properties={ 'name': 'a', 'age': 32, 'array': [0, 1, 2] }) self.assertEqual([a], result.result_set[0][0]) # All done, remove graph. redis_graph.delete()
def binary_tree_graph2(): global redis_graph redis_con = _brand_new_redis() redis_graph = Graph("G2", redis_con) redis_graph.query("CREATE(a: A {name: 'a'}), \ (b1: X {name: 'b1'}), \ (b2: X {name: 'b2'}), \ (b3: X {name: 'b3'}), \ (b4: X {name: 'b4'}), \ (c11: X {name: 'c11'}), \ (c12: Y {name: 'c12'}), \ (c21: X {name: 'c21'}), \ (c22: Y {name: 'c22'}), \ (c31: X {name: 'c31'}), \ (c32: Y {name: 'c32'}), \ (c41: X {name: 'c41'}), \ (c42: Y {name: 'c42'}) \ CREATE(a)-[:KNOWS] -> (b1), \ (a)-[:KNOWS] -> (b2), \ (a)-[:FOLLOWS] -> (b3), \ (a)-[:FOLLOWS] -> (b4) \ CREATE(b1)-[:FRIEND] -> (c11),\ (b1)-[:FRIEND] -> (c12), \ (b2)-[:FRIEND] -> (c21), \ (b2)-[:FRIEND] -> (c22), \ (b3)-[:FRIEND] -> (c31), \ (b3)-[:FRIEND] -> (c32), \ (b4)-[:FRIEND] -> (c41), \ (b4)-[:FRIEND] -> (c42) \ CREATE(b1)-[:FRIEND] -> (b2), \ (b2)-[:FRIEND] -> (b3), \ (b3)-[:FRIEND] -> (b4), \ (b4)-[:FRIEND] -> (b1) \ ")
def test06_batched_build(self): # Create demo graph wth one query per input file graphname = "batched_graph" runner = CliRunner() csv_path = os.path.dirname(os.path.abspath(__file__)) + '/../../demo/bulk_insert/resources/' res = runner.invoke(bulk_insert, ['--port', port, '--nodes', csv_path + 'Person.csv', '--nodes', csv_path + 'Country.csv', '--relations', csv_path + 'KNOWS.csv', '--relations', csv_path + 'VISITED.csv', '--max-token-count', 1, graphname]) self.env.assertEquals(res.exit_code, 0) # The script should report statistics multiple times self.env.assertGreater(res.output.count('nodes created'), 1) new_graph = Graph(graphname, redis_con) # Newly-created graph should be identical to graph created in single query original_result = redis_graph.query('MATCH (p:Person) RETURN p, ID(p) ORDER BY p.name') new_result = new_graph.query('MATCH (p:Person) RETURN p, ID(p) ORDER BY p.name') self.env.assertEquals(original_result.result_set, new_result.result_set) original_result = redis_graph.query('MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e, b.name ORDER BY e.relation, a.name') new_result = new_graph.query('MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e, b.name ORDER BY e.relation, a.name') self.env.assertEquals(original_result.result_set, new_result.result_set)
def test25_merge_with_where(self): redis_con = self.env.getConnection() graph = Graph("M", redis_con) # Index the "L:prop) combination so that the MERGE tree will not have a filter op. query = """CREATE INDEX ON :L(prop)""" graph.query(query) query = """MERGE (n:L {prop:1}) WITH n WHERE n.prop < 1 RETURN n.prop""" result = graph.query(query) plan = graph.execution_plan(query) # Verify that the Filter op follows a Project op. self.env.assertTrue(re.search('Project\s+Filter', plan)) # Verify that there is no Filter op after the Merge op. self.env.assertFalse(re.search('Merge\s+Filter', plan)) # Verify that the entity was created and no results were returned. self.env.assertEquals(result.nodes_created, 1) self.env.assertEquals(result.properties_set, 1) # Repeat the query. result = graph.query(query) # Verify that no data was modified and no results were returned. self.env.assertEquals(result.nodes_created, 0) self.env.assertEquals(result.properties_set, 0)
def test06_no_compaction_on_multiple_edges_delete(self): graph_name = "no_compaction_on_multiple_edges_delete" redis_graph = Graph(graph_name, redis_con) # Create 3 nodes meta keys redis_graph.query( "CREATE (n1 {val:1}), (n2 {val:2}) WITH n1, n2 UNWIND range(0,20) as i CREATE (n1)-[:R]->(n2)" ) # Return all the edges, before and after saving & loading the RDB, and check equality query = "MATCH ()-[e]->() WITH e ORDER by id(e) return COLLECT(id(e))" expected_full_graph_nodes_id = redis_graph.query(query) # Delete 3 edges. redis_graph.query("MATCH ()-[e]->() WHERE id(e) IN [7,14,20] DELETE e") expected_nodes_id_after_delete = redis_graph.query(query) # Save RDB & Load from RDB redis_con.execute_command("DEBUG", "RELOAD") actual = redis_graph.query(query) # Validate no compaction, all IDs are the same self.env.assertEquals(expected_nodes_id_after_delete.result_set, actual.result_set) # Validate reuse of edges ids - create 3 edges. redis_graph.query( "MATCH (n1 {val:1}), (n2 {val:2}) WITH n1, n2 UNWIND range (0,2) as i CREATE ()-[:R]->()" ) actual = redis_graph.query(query) self.env.assertEquals(expected_full_graph_nodes_id.result_set, actual.result_set)
def test02_relation_within_label(self): graphname = "graph2" runner = CliRunner() # Insert the 'person' label and the 'know' and 'married' relations csv_path = os.path.dirname( os.path.abspath(__file__)) + '/../../demo/bulk_insert/' res = runner.invoke(bulk_insert, [ '--port', port, '--nodes', csv_path + 'person.csv', '--relations', csv_path + 'know.csv', '--relations', csv_path + 'married.csv', graphname ]) # The script should report 11 node creations and 33 edge creations assert res.exit_code == 0 assert '11 Nodes created' in res.output assert '33 Relations created' in res.output redis_graph = Graph(graphname, redis_con) # Verify that the right number of relations are found query_result = redis_graph.query( 'MATCH (p:person)-[:know]->(q:person) RETURN COUNT(q)') assert int(float(query_result.result_set[1][0])) == 29 query_result = redis_graph.query( 'MATCH (p:person)-[:married]->(q:person) RETURN COUNT(q)') assert int(float(query_result.result_set[1][0])) == 4
def test03_restore_properties(self): graph_names = ("simple_props", "{tag}_simple_props") for graph_name in graph_names: graph = Graph(graph_name, redis_con) query = """CREATE (:p {strval: 'str', numval: 5.5, boolval: true, array: [1,2,3], pointval: point({latitude: 5.5, longitude: 6})})""" result = graph.query(query) # Verify that node was created correctly self.env.assertEquals(result.nodes_created, 1) self.env.assertEquals(result.properties_set, 5) # Save RDB & Load from RDB self.env.dumpAndReload() query = """MATCH (p) RETURN p.boolval, p.numval, p.strval, p.array, p.pointval""" actual_result = graph.query(query) # Verify that the properties are loaded correctly. expected_result = [[ True, 5.5, 'str', [1, 2, 3], { "latitude": 5.5, "longitude": 6.0 } ]] self.env.assertEquals(actual_result.result_set, expected_result)
def populate_graph(self, graph_name): # quick return if graph already exists if redis_con.exists(graph_name): return redis_graph people = ["Roi", "Alon", "Ailon", "Boaz", "Tal", "Omri", "Ori"] visits = [("Roi", "USA"), ("Alon", "Israel"), ("Ailon", "Japan"), ("Boaz", "United Kingdom")] countries = ["Israel", "USA", "Japan", "United Kingdom"] redis_graph = Graph(graph_name, redis_con) personNodes = {} countryNodes = {} # create nodes for p in people: person = Node(label="person", properties={ "name": p, "height": random.randint(160, 200) }) redis_graph.add_node(person) personNodes[p] = person for p in countries: country = Node(label="country", properties={ "name": p, "population": random.randint(100, 400) }) redis_graph.add_node(country) countryNodes[p] = country # create edges for v in visits: person = v[0] country = v[1] edge = Edge(personNodes[person], 'visit', countryNodes[country], properties={'purpose': 'pleasure'}) redis_graph.add_edge(edge) redis_graph.commit() # delete nodes, to introduce deleted item within our datablock query = """MATCH (n:person) WHERE n.name = 'Roi' or n.name = 'Ailon' DELETE n""" redis_graph.query(query) query = """MATCH (n:country) WHERE n.name = 'USA' DELETE n""" redis_graph.query(query) # create indices actual_result = redis_con.execute_command( "GRAPH.QUERY", graph_name, "CREATE INDEX ON :person(name, height)") actual_result = redis_con.execute_command( "GRAPH.QUERY", graph_name, "CREATE INDEX ON :country(name, population)") return redis_graph
def issue_query(conn, q, should_fail): try: g = Graph(GRAPH_NAME, conn) g.query(q) return not should_fail except Exception as e: assert "Query's mem consumption exceeded capacity" in str(e) return should_fail
def test18_index_scan_inside_apply(self): redis_graph = Graph('g', self.env.getConnection()) redis_graph.query("CREATE INDEX ON :L1(id)") redis_graph.query("UNWIND range(1, 5) AS v CREATE (:L1 {id: v})") result = redis_graph.query("UNWIND range(1, 5) AS id OPTIONAL MATCH (u:L1{id: 5}) RETURN u.id") expected_result = [[5], [5], [5], [5], [5]] self.env.assertEquals(result.result_set, expected_result)
def test_read_only_query(self): redis_graph = Graph('read_only', self.r) try: # Issue a write query, specifying read-only true, this call should fail. redis_graph.query("CREATE (p:person {name:'a'})", read_only=True) assert (False) except Exception: # Expecting an error. pass
def test04_batched_build(self): """ Create a graph using many batches. Reuses the inputs of test01_social_graph """ graphname = "batched_graph" runner = CliRunner() csv_path = os.path.dirname(os.path.abspath(__file__)) + '/../example/' person_file = csv_path + 'Person.csv' country_file = csv_path + 'Country.csv' knows_file = csv_path + 'KNOWS.csv' visited_file = csv_path + 'VISITED.csv' csv_path = os.path.dirname( os.path.abspath(__file__)) + '/../../demo/bulk_insert/resources/' # Build the social graph again with a max token count of 1. res = runner.invoke(bulk_insert, [ '--nodes', person_file, '--nodes', country_file, '--relations', knows_file, '--relations', visited_file, '--max-token-count', 1, graphname ], catch_exceptions=False) # The script should report 27 overall node creations and 48 edge creations. self.assertEqual(res.exit_code, 0) self.assertIn("27 nodes created", res.output) self.assertIn("48 relations created", res.output) # Validate creation count by label/type self.assertIn(person_count + " nodes created with label 'Person'", res.output) self.assertIn(country_count + " nodes created with label 'Country'", res.output) self.assertIn(knows_count + " relations created for type 'KNOWS'", res.output) self.assertIn(visited_count + " relations created for type 'VISITED'", res.output) original_graph = Graph('social', self.redis_con) new_graph = Graph(graphname, self.redis_con) # Newly-created graph should be identical to graph created in single bulk command original_result = original_graph.query( 'MATCH (p:Person) RETURN p, ID(p) ORDER BY p.name') new_result = new_graph.query( 'MATCH (p:Person) RETURN p, ID(p) ORDER BY p.name') self.assertEqual(original_result.result_set, new_result.result_set) original_result = original_graph.query( 'MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e, b.name ORDER BY e.relation, a.name' ) new_result = new_graph.query( 'MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e, b.name ORDER BY e.relation, a.name' ) self.assertEqual(original_result.result_set, new_result.result_set)
def test01_graph_access_on_invalid_key(self): redis_con.set("integer_key", 5) graph = Graph("integer_key", redis_con) try: query = """MATCH (n) RETURN noneExistingFunc(n.age) AS cast""" graph.query(query) assert (False) except redis.exceptions.ResponseError as e: # Expecting an error. assert ("WRONGTYPE" in str(e)) pass
def test01_test_create(self): # Both queries do exactly the same operations graph = Graph('Cache_Test_Create', redis_con) query = "CREATE ()" self.compare_uncached_to_cached_query_plans(query) uncached_result = graph.query(query) cached_result = graph.query(query) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual(uncached_result.nodes_created, cached_result.nodes_created) graph.delete()
def empty_graph(): global redis_graph redis_con = _brand_new_redis() redis_graph = Graph("G", redis_con) # Create a graph with a single node. redis_graph.add_node(Node()) redis_graph.commit() # Delete node to have an empty graph. redis_graph.query("MATCH (n) DELETE n")
def test03_edges_over_multiple_keys(self): graph_name = "edges_over_multiple_keys" redis_graph = Graph(graph_name, redis_con) # Create 3 edges meta keys redis_graph.query("UNWIND range(0,20) as i CREATE ()-[:R {val:i}]->()") # Return all the edges, before and after saving & loading the RDB, and check equality query = "MATCH ()-[e]->() return e" expected = redis_graph.query(query) # Save RDB & Load from RDB redis_con.execute_command("DEBUG", "RELOAD") actual = redis_graph.query(query) self.env.assertEquals(expected.result_set, actual.result_set)
def test07_index_after_encode_decode_in_v7(self): graph_name = "index_after_encode_decode_in_v7" redis_graph = Graph(graph_name, redis_con) redis_graph.query("CREATE INDEX ON :N(val)") # Verify indices exists. plan = redis_graph.execution_plan("MATCH (n:N {val:1}) RETURN n") self.env.assertIn("Index Scan", plan) # Save RDB & Load from RDB redis_con.execute_command("DEBUG", "RELOAD") # Verify indices exists after loading RDB. plan = redis_graph.execution_plan("MATCH (n:N {val:1}) RETURN n") self.env.assertIn("Index Scan", plan)
def test02_test_create_with_params(self): # Both queries do exactly the same operations graph = Graph('Cache_Test_Create_With_Params', redis_con) params = {'val' : 1} query = "CREATE ({val:$val})" self.compare_uncached_to_cached_query_plans(query) uncached_result = graph.query(query, params) params = {'val' : 2} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual(uncached_result.nodes_created, cached_result.nodes_created) graph.delete()
def populate_graph(cls): global redis_graph redis_con = cls.r.client() redis_graph = Graph(GRAPH_ID, redis_con) # Create entities for i in range(10): node = Node(label="person", properties={"id": i}) redis_graph.add_node(node) redis_graph.commit() # Make sure node id attribute matches node's internal ID. query = """MATCH (n) SET n.id = ID(n)""" redis_graph.query(query)
def test05_distinct_full_entities(self): graph2 = Graph("H", redis_con) query = """CREATE (a)-[:e]->(), (a)-[:e]->()""" result = graph2.query(query) self.env.assertEquals(result.nodes_created, 3) self.env.assertEquals(result.relationships_created, 2) query = """MATCH (a)-[]->() RETURN a""" non_distinct = graph2.query(query) query = """MATCH (a)-[]->() RETURN DISTINCT a""" distinct = graph2.query(query) self.env.assertEquals(len(non_distinct.result_set), 2) self.env.assertEquals(len(distinct.result_set), 1)
def test10_test_labelscan_update(self): # In this scenario a label scan is made for non existing label # than the label is created and the label scan query is re-used. graph = Graph('Cache_test_labelscan_update', redis_con) query = "MATCH (n:Label) return n" result = graph.query(query) self.env.assertEqual(0, len(result.result_set)) query = "MERGE (n:Label)" result = graph.query(query) self.env.assertEqual(1, result.nodes_created) query = "MATCH (n:Label) return n" result = graph.query(query) self.env.assertEqual(1, len(result.result_set)) self.env.assertEqual("Label", result.result_set[0][0].label)