def compare_uncached_to_cached_query_plans(self, query): global redis_con plan_graph = Graph('Cache_Test_plans', redis_con) uncached_plan = plan_graph.execution_plan(query) cached_plan = plan_graph.execution_plan(query) self.env.assertEqual(uncached_plan, cached_plan) plan_graph.delete()
def test_array_functions(self): redis_graph = Graph('social', self.r) query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})""" redis_graph.query(query) query = """WITH [0,1,2] as x return x""" result = redis_graph.query(query) self.assertEqual([0, 1, 2], result.result_set[0][0]) query = """MATCH(n) return collect(n)""" result = redis_graph.query(query) a = Node(node_id=0, label='person', properties={ 'name': 'a', 'age': 32, 'array': [0, 1, 2] }) self.assertEqual([a], result.result_set[0][0]) # All done, remove graph. redis_graph.delete()
def test02_graph_delete_on_empty_key(self): graph = Graph("nonexistent_key", redis_con) try: graph.delete() assert (False) except redis.exceptions.ResponseError as e: # Expecting an error. assert ("empty key" in str(e)) pass
def test01_test_create(self): # Both queries do exactly the same operations graph = Graph('Cache_Test_Create', redis_con) query = "CREATE ()" self.compare_uncached_to_cached_query_plans(query) uncached_result = graph.query(query) cached_result = graph.query(query) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual(uncached_result.nodes_created, cached_result.nodes_created) graph.delete()
def delete_graph(graph_id): env = Env(decodeResponses=True) conn = env.getConnection() graph = Graph(graph_id, conn) # Try to delete graph. try: graph.delete() return True except: # Graph deletion failed. return False
def test_param(self): redis_graph = Graph('params', self.r) params = [1, 2.3, "str", True, False, None, [0, 1, 2]] query = "RETURN $param" for param in params: result = redis_graph.query(query, {'param': param}) expected_results = [[param]] self.assertEqual(expected_results, result.result_set) # All done, remove graph. redis_graph.delete()
def test02_test_create_with_params(self): # Both queries do exactly the same operations graph = Graph('Cache_Test_Create_With_Params', redis_con) params = {'val' : 1} query = "CREATE ({val:$val})" self.compare_uncached_to_cached_query_plans(query) uncached_result = graph.query(query, params) params = {'val' : 2} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual(uncached_result.nodes_created, cached_result.nodes_created) graph.delete()
def test_execution_plan(self): redis_graph = Graph('execution_plan', self.r) create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" redis_graph.query(create_query) result = redis_graph.execution_plan( "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params", {'name': 'Yehuda'}) expected = "Results\n Project\n Conditional Traverse | (t:Team)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" self.assertEqual(result, expected) redis_graph.delete()
def test_sanity_check(self): graph = Graph('Cache_Sanity_Check', redis_con) for i in range(CACHE_SIZE + 1): result = graph.query("MATCH (n) WHERE n.value = {val} RETURN n".format(val=i)) self.env.assertFalse(result.cached_execution) for i in range(1,CACHE_SIZE + 1): result = graph.query("MATCH (n) WHERE n.value = {val} RETURN n".format(val=i)) self.env.assertTrue(result.cached_execution) result = graph.query("MATCH (n) WHERE n.value = 0 RETURN n") self.env.assertFalse(result.cached_execution) graph.delete()
def test07_test_optimizations_id_scan(self): graph = Graph('Cache_Test_ID_Scan', redis_con) query = "CREATE (), ()" graph.query(query) query = "MATCH (n) WHERE ID(n)=$id RETURN id(n)" self.compare_uncached_to_cached_query_plans(query) params = {'id': 0} uncached_result = graph.query(query, params) params = {'id': 1} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual([[0]], uncached_result.result_set) self.env.assertEqual([[1]], cached_result.result_set) graph.delete()
def test08_test_join(self): graph = Graph('Cache_Test_Join', redis_con) query = "CREATE ({val:1}), ({val:2}), ({val:3}),({val:4})" graph.query(query) query = "MATCH (a {val:$val}), (b) WHERE a.val = b.val-1 RETURN a.val, b.val " self.compare_uncached_to_cached_query_plans(query) params = {'val': 1} uncached_result = graph.query(query, params) params = {'val': 3} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual([[1, 2]], uncached_result.result_set) self.env.assertEqual([[3, 4]], cached_result.result_set) graph.delete()
def test_stringify_query_result(self): redis_graph = Graph('stringify', self.r) john = Node(alias='a', label='person', properties={ 'name': 'John Doe', 'age': 33, 'gender': 'male', 'status': 'single' }) redis_graph.add_node(john) japan = Node(alias='b', label='country', properties={'name': 'Japan'}) redis_graph.add_node(japan) edge = Edge(john, 'visited', japan, properties={'purpose': 'pleasure'}) redis_graph.add_edge(edge) self.assertEqual( str(john), """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})""" ) self.assertEqual( str(edge), """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})""" + """-[:visited{purpose:"pleasure"}]->""" + """(b:country{name:"Japan"})""") self.assertEqual(str(japan), """(b:country{name:"Japan"})""") redis_graph.commit() query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country) RETURN p, v, c""" result = redis_graph.query(query) person = result.result_set[0][0] visit = result.result_set[0][1] country = result.result_set[0][2] self.assertEqual( str(person), """(:person{age:33,gender:"male",name:"John Doe",status:"single"})""" ) self.assertEqual(str(visit), """()-[:visited{purpose:"pleasure"}]->()""") self.assertEqual(str(country), """(:country{name:"Japan"})""") redis_graph.delete()
def test04_multi_hop_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) graph.delete() # create graph query = "CREATE (a:A), (b:B), (i), (a)-[:R]->(i)-[:R]->(b), (a)-[:R]->(i)-[:R]->(b)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge # there are multiple ways to reach (b) from (a) query = "MATCH (a:A)-[*]->(b:B) WITH a,b MATCH (a)-[:R]->()-[]->(b) RETURN count(1)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(4, result.result_set[0][0])
def test06_test_optimizations_index(self): graph = Graph('Cache_Test_Index', redis_con) graph.query("CREATE INDEX ON :N(val)") query = "CREATE (:N{val:1}), (:N{val:2})" graph.query(query) query = "MATCH (n:N{val:$val}) RETURN n.val" self.compare_uncached_to_cached_query_plans(query) params = {'val': 1} uncached_result = graph.query(query, params) params = {'val': 2} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual([[1]], uncached_result.result_set) self.env.assertEqual([[2]], cached_result.result_set) graph.delete()
def test05_test_branching_with_path_filter(self): # Different outcome, same execution plan. graph = Graph('Cache_Test_Path_Filter', redis_con) query = "CREATE ({val:1})-[:R]->({val:2})-[:R2]->({val:3})" graph.query(query) query = "MATCH (n) WHERE (n)-[:R]->({val:$val}) OR (n)-[:R2]->({val:$val}) RETURN n.val" self.compare_uncached_to_cached_query_plans(query) params = {'val': 2} uncached_result = graph.query(query, params) params = {'val': 3} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual([[1]], uncached_result.result_set) self.env.assertEqual([[2]], cached_result.result_set) graph.delete()
def test03_multi_hop_no_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) graph.delete() # create graph query = "CREATE (:A)-[:R]->()-[:R]->(:B)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge # expand-into inspects the result of (F*R*ADJ)[a,b] query = "MATCH (a:A)-[*]->(b:B) WITH a,b MATCH (a)-[:R]->()-[]->(b) RETURN count(a)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0])
def introduction_2(): import redis from redisgraph import Graph from class4pgm import ClassManager # connect to the local redis r = redis.Redis() # create a redis graph named example graph = Graph("example", r) manager = ClassManager(graph) succeeded = manager.insert_defined_class([Person, Student, Teacher, Teach]) # [True, True, True, True] print(succeeded) manager.model_to_db_object(john, auto_add=True) manager.model_to_db_object(kate, auto_add=True) manager.model_to_db_object(teach, auto_add=True) graph.flush() """ Open new client to retrieve classes and instances. """ r = redis.Redis() # the defined classes will be retrieved automatically during the construction of the graph client. manager = ClassManager(graph) # retrieve every nodes results = graph.query("""Match (a) return a""") for result in results.result_set: print(manager.db_object_to_model(result[0])) # (:ClassDefinitionWrapper {...}) # ... # (:IntlStudent:Student:Person) # (:Teacher:Person) # acquire Teach class. T = manager.get('Teach') # query every edges belonging to Teach class result = graph.query(f"Match ()-[a:{Teach.__name__}]->() return a") for row in result.result_set: print(manager.edge_to_model(row[0])) # ()-[:Teach]->() graph.delete()
def test04_test_merge(self): # Different outcome, same execution plan. graph = Graph('Cache_Test_Merge', redis_con) params = {'create_val': 0, 'match_val':1} query = "MERGE (n) ON CREATE SET n.val = $create_val ON MATCH SET n.val = $match_val RETURN n.val" self.compare_uncached_to_cached_query_plans(query) uncached_result = graph.query(query, params) cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual(uncached_result.properties_set, cached_result.properties_set) self.env.assertEqual([[0]], uncached_result.result_set) self.env.assertEqual(1, uncached_result.nodes_created) self.env.assertEqual([[1]], cached_result.result_set) self.env.assertEqual(0, cached_result.nodes_created) graph.delete()
def test_point(self): redis_graph = Graph('map', self.r) query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})" expected_lat = 32.070794860 expected_lon = 34.820751118 actual = redis_graph.query(query).result_set[0][0] self.assertTrue(abs(actual['latitude'] - expected_lat) < 0.001) self.assertTrue(abs(actual['longitude'] - expected_lon) < 0.001) query = "RETURN point({latitude: 32, longitude: 34.0})" expected_lat = 32 expected_lon = 34 actual = redis_graph.query(query).result_set[0][0] self.assertTrue(abs(actual['latitude'] - expected_lat) < 0.001) self.assertTrue(abs(actual['longitude'] - expected_lon) < 0.001) # All done, remove graph. redis_graph.delete()
def test03_test_delete(self): # Both queries do exactly the same operations graph = Graph('Cache_Test_Delete', redis_con) for i in range(2): params = {'val' : i} query = "CREATE ({val:$val})-[:R]->()" graph.query(query, params) params = {'val': 0} query = "MATCH (n {val:$val}) DELETE n" self.compare_uncached_to_cached_query_plans(query) uncached_result = graph.query(query, params) params = {'val': 1} cached_result = graph.query(query, params) self.env.assertFalse(uncached_result.cached_execution) self.env.assertTrue(cached_result.cached_execution) self.env.assertEqual(uncached_result.relationships_deleted, cached_result.relationships_deleted) self.env.assertEqual(uncached_result.nodes_deleted, cached_result.nodes_deleted) graph.delete()
def test_cached_execution(self): redis_graph = Graph('cached', self.r) redis_graph.query("CREATE ()") uncached_result = redis_graph.query("MATCH (n) RETURN n, $param", {'param': [0]}) self.assertFalse(uncached_result.cached_execution) # loop to make sure the query is cached on each thread on server for x in range(0, 64): cached_result = redis_graph.query("MATCH (n) RETURN n, $param", {'param': [0]}) self.assertEqual(uncached_result.result_set, cached_result.result_set) # should be cached on all threads by now self.assertTrue(cached_result.cached_execution) redis_graph.delete()
def test_index_response(self): redis_graph = Graph('social', self.r) result_set = redis_graph.query("CREATE INDEX ON :person(age)") self.assertEqual(1, result_set.indices_created) result_set = redis_graph.query("CREATE INDEX ON :person(age)") self.assertEqual(0, result_set.indices_created) result_set = redis_graph.query("DROP INDEX ON :person(age)") self.assertEqual(1, result_set.indices_deleted) try: result_set = redis_graph.query("DROP INDEX ON :person(age)") except redis.exceptions.ResponseError as e: self.assertEqual( e.__str__(), "Unable to drop index on :person(age): no such index.") redis_graph.delete()
def test_graph_creation(self): redis_graph = Graph('social', self.r) john = Node(label='person', properties={ 'name': 'John Doe', 'age': 33, 'gender': 'male', 'status': 'single' }) redis_graph.add_node(john) japan = Node(label='country', properties={'name': 'Japan'}) redis_graph.add_node(japan) edge = Edge(john, 'visited', japan, properties={'purpose': 'pleasure'}) redis_graph.add_edge(edge) redis_graph.commit() query = ( 'MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country) ' 'RETURN p, v, c') result = redis_graph.query(query) person = result.result_set[0][0] visit = result.result_set[0][1] country = result.result_set[0][2] self.assertEqual(person, john) self.assertEqual(visit.properties, edge.properties) self.assertEqual(country, japan) query = """RETURN [1, 2.3, "4", true, false, null]""" result = redis_graph.query(query) self.assertEqual([1, 2.3, "4", True, False, None], result.result_set[0][0]) # All done, remove graph. redis_graph.delete()
def test_map(self): redis_graph = Graph('map', self.r) query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}" actual = redis_graph.query(query).result_set[0][0] expected = { 'a': 1, 'b': 'str', 'c': None, 'd': [1, 2, 3], 'e': True, 'f': { 'x': 1, 'y': 2 } } self.assertEqual(actual, expected) # All done, remove graph. redis_graph.delete()
def test_optional_match(self): redis_graph = Graph('optional', self.r) # Build a graph of form (a)-[R]->(b) node0 = Node(node_id=0, label="L1", properties={'value': 'a'}) node1 = Node(node_id=1, label="L1", properties={'value': 'b'}) edge01 = Edge(node0, "R", node1, edge_id=0) redis_graph.add_node(node0) redis_graph.add_node(node1) redis_graph.add_edge(edge01) redis_graph.flush() # Issue a query that collects all outgoing edges from both nodes (the second has none). query = """MATCH (a) OPTIONAL MATCH (a)-[e]->(b) RETURN a, e, b ORDER BY a.value""" expected_results = [[node0, edge01, node1], [node1, None, None]] result = redis_graph.query(query) self.assertEqual(expected_results, result.result_set) redis_graph.delete()
def test_path(self): redis_graph = Graph('social', self.r) node0 = Node(node_id=0, label="L1") node1 = Node(node_id=1, label="L1") edge01 = Edge(node0, "R1", node1, edge_id=0, properties={'value': 1}) redis_graph.add_node(node0) redis_graph.add_node(node1) redis_graph.add_edge(edge01) redis_graph.flush() path01 = Path.new_empty_path().add_node(node0).add_edge( edge01).add_node(node1) expected_results = [[path01]] query = "MATCH p=(:L1)-[:R1]->(:L1) RETURN p ORDER BY p" result = redis_graph.query(query) self.assertEqual(expected_results, result.result_set) # All done, remove graph. redis_graph.delete()
def test02_single_hop_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) graph.delete() # create graph query = "CREATE (a:A)-[:R {v:1}]->(b:B), (a)-[:R {v:2}]->(b)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge # there are multiple edges of type 'R' connecting the two query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN count(e)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(2, result.result_set[0][0]) query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN e.v ORDER BY e.v" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0]) self.env.assertEquals(2, result.result_set[1][0])
def test01_single_hop_no_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) try: graph.delete() except: pass # create graph query = "CREATE (:A)-[:R {v:1}]->(:B)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[:R]->(b) RETURN count(a)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0]) query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN e.v" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0])
class testConcurrentQueryFlow(FlowTestsBase): def __init__(self): self.env = Env(decodeResponses=True) # skip test if we're running under Valgrind if self.env.envRunner.debugger is not None: self.env.skip( ) # valgrind is not working correctly with multi processing self.conn = self.env.getConnection() self.graph = Graph(GRAPH_ID, self.conn) self.populate_graph() def populate_graph(self): nodes = {} # Create entities for p in people: node = Node(label="person", properties={"name": p}) self.graph.add_node(node) nodes[p] = node # Fully connected graph for src in nodes: for dest in nodes: if src != dest: edge = Edge(nodes[src], "know", nodes[dest]) self.graph.add_edge(edge) self.graph.commit() # Count number of nodes in the graph def test01_concurrent_aggregation(self): q = """MATCH (p:person) RETURN count(p)""" queries = [q] * CLIENT_COUNT results = run_concurrent(queries, thread_run_query) for result in results: person_count = result["result_set"][0][0] self.env.assertEqual(person_count, len(people)) # Concurrently get neighbors of every node. def test02_retrieve_neighbors(self): q = """MATCH (p:person)-[know]->(n:person) RETURN n.name""" queries = [q] * CLIENT_COUNT results = run_concurrent(queries, thread_run_query) # Fully connected graph + header row. expected_resultset_size = len(people) * (len(people) - 1) for result in results: self.env.assertEqual(len(result["result_set"]), expected_resultset_size) # Concurrent writes def test_03_concurrent_write(self): queries = [ """CREATE (c:country {id:"%d"})""" % i for i in range(CLIENT_COUNT) ] results = run_concurrent(queries, thread_run_query) for result in results: self.env.assertEqual(result["nodes_created"], 1) self.env.assertEqual(result["properties_set"], 1) # Try to delete graph multiple times. def test_04_concurrent_delete(self): pool = Pool(nodes=CLIENT_COUNT) # invoke queries assertions = pool.map(delete_graph, [GRAPH_ID] * CLIENT_COUNT) # Exactly one thread should have successfully deleted the graph. self.env.assertEquals(assertions.count(True), 1) # Try to delete a graph while multiple queries are executing. def test_05_concurrent_read_delete(self): ############################################################################################## # Delete graph via Redis DEL key. ############################################################################################## self.populate_graph() pool = Pool(nodes=CLIENT_COUNT) manager = pathos_multiprocess.Manager() barrier = manager.Barrier(CLIENT_COUNT) barriers = [barrier] * CLIENT_COUNT q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x""" queries = [q] * CLIENT_COUNT # invoke queries m = pool.amap(thread_run_query, queries, barriers) self.conn.delete(GRAPH_ID) # wait for processes to return m.wait() # get the results results = m.get() # validate result. self.env.assertTrue( all([r["result_set"][0][0] == 900 for r in results])) # Make sure Graph is empty, e.g. graph was deleted. resultset = self.graph.query("MATCH (n) RETURN count(n)").result_set self.env.assertEquals(resultset[0][0], 0) ############################################################################################## # Delete graph via Redis FLUSHALL. ############################################################################################## self.populate_graph() q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x""" queries = [q] * CLIENT_COUNT barrier = manager.Barrier(CLIENT_COUNT) barriers = [barrier] * CLIENT_COUNT # invoke queries m = pool.amap(thread_run_query, queries, barriers) self.conn.flushall() # wait for processes to return m.wait() # get the results results = m.get() # validate result. self.env.assertTrue( all([r["result_set"][0][0] == 900 for r in results])) # Make sure Graph is empty, e.g. graph was deleted. resultset = self.graph.query("MATCH (n) RETURN count(n)").result_set self.env.assertEquals(resultset[0][0], 0) ############################################################################################## # Delete graph via GRAPH.DELETE. ############################################################################################## self.populate_graph() q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x""" queries = [q] * CLIENT_COUNT barrier = manager.Barrier(CLIENT_COUNT) barriers = [barrier] * CLIENT_COUNT # invoke queries m = pool.amap(thread_run_query, queries, barriers) self.graph.delete() # wait for processes to return m.wait() # get the results results = m.get() # validate result. self.env.assertTrue( all([r["result_set"][0][0] == 900 for r in results])) # Make sure Graph is empty, e.g. graph was deleted. resultset = self.graph.query("MATCH (n) RETURN count(n)").result_set self.env.assertEquals(resultset[0][0], 0) def test_06_concurrent_write_delete(self): # Test setup - validate that graph exists and possible results are None self.graph.query("MATCH (n) RETURN n") pool = Pool(nodes=1) heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n) RETURN count(n)""" writer = pool.apipe(thread_run_query, heavy_write_query, None) self.conn.delete(GRAPH_ID) writer.wait() possible_exceptions = [ "Encountered different graph value when opened key " + GRAPH_ID, "Encountered an empty key when opened key " + GRAPH_ID ] result = writer.get() if isinstance(result, str): self.env.assertContains(result, possible_exceptions) else: self.env.assertEquals(1000000, result["result_set"][0][0]) def test_07_concurrent_write_rename(self): # Test setup - validate that graph exists and possible results are None self.graph.query("MATCH (n) RETURN n") pool = Pool(nodes=1) new_graph = GRAPH_ID + "2" # Create new empty graph with id GRAPH_ID + "2" self.conn.execute_command("GRAPH.QUERY", new_graph, """MATCH (n) return n""", "--compact") heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n) RETURN count(n)""" writer = pool.apipe(thread_run_query, heavy_write_query, None) self.conn.rename(GRAPH_ID, new_graph) writer.wait() # Possible scenarios: # 1. Rename is done before query is sent. The name in the graph context is new_graph, so when upon commit, when trying to open new_graph key, it will encounter an empty key since new_graph is not a valid key. # Note: As from https://github.com/RedisGraph/RedisGraph/pull/820 this may not be valid since the rename event handler might actually rename the graph key, before the query execution. # 2. Rename is done during query executing, so when commiting and comparing stored graph context name (GRAPH_ID) to the retrived value graph context name (new_graph), the identifiers are not the same, since new_graph value is now stored at GRAPH_ID value. possible_exceptions = [ "Encountered different graph value when opened key " + GRAPH_ID, "Encountered an empty key when opened key " + new_graph ] result = writer.get() if isinstance(result, str): self.env.assertContains(result, possible_exceptions) else: self.env.assertEquals(1000000, result["result_set"][0][0]) def test_08_concurrent_write_replace(self): # Test setup - validate that graph exists and possible results are None self.graph.query("MATCH (n) RETURN n") pool = Pool(nodes=1) heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n) RETURN count(n)""" writer = pool.apipe(thread_run_query, heavy_write_query, None) set_result = self.conn.set(GRAPH_ID, "1") writer.wait() possible_exceptions = [ "Encountered a non-graph value type when opened key " + GRAPH_ID, "WRONGTYPE Operation against a key holding the wrong kind of value" ] result = writer.get() if isinstance(result, str): # If the SET command attempted to execute while the CREATE query was running, # an exception should have been issued. self.env.assertContains(result, possible_exceptions) else: # Otherwise, both the CREATE query and the SET command should have succeeded. self.env.assertEquals(1000000, result.result_set[0][0]) self.env.assertEquals(set_result, True) # Delete the key self.conn.delete(GRAPH_ID) def test_09_concurrent_multiple_readers_after_big_write(self): # Test issue #890 self.graph = Graph(GRAPH_ID, self.conn) self.graph.query("""UNWIND(range(0,999)) as x CREATE()-[:R]->()""") read_query = """MATCH (n)-[r:R]->(m) RETURN count(r) AS res UNION RETURN 0 AS res""" queries = [read_query] * CLIENT_COUNT results = run_concurrent(queries, thread_run_query) for result in results: if isinstance(result, str): self.env.assertEquals(0, result) else: self.env.assertEquals(1000, result["result_set"][0][0])
def test_cache_sync(self): pass return # This test verifies that client internal graph schema cache stays # in sync with the graph schema # # Client B will try to get Client A out of sync by: # 1. deleting the graph # 2. reconstructing the graph in a different order, this will casuse # a differance in the current mapping between string IDs and the # mapping Client A is aware of # # Client A should pick up on the changes by comparing graph versions # and resyncing its cache. A = Graph('cache-sync', self.r) B = Graph('cache-sync', self.r) # Build order: # 1. introduce label 'L' and 'K' # 2. introduce attribute 'x' and 'q' # 3. introduce relationship-type 'R' and 'S' A.query("CREATE (:L)") B.query("CREATE (:K)") A.query("MATCH (n) SET n.x = 1") B.query("MATCH (n) SET n.q = 1") A.query("MATCH (n) CREATE (n)-[:R]->()") B.query("MATCH (n) CREATE (n)-[:S]->()") # Cause client A to populate its cache A.query("MATCH (n)-[e]->() RETURN n, e") assert (len(A._labels) == 2) assert (len(A._properties) == 2) assert (len(A._relationshipTypes) == 2) assert (A._labels[0] == 'L') assert (A._labels[1] == 'K') assert (A._properties[0] == 'x') assert (A._properties[1] == 'q') assert (A._relationshipTypes[0] == 'R') assert (A._relationshipTypes[1] == 'S') # Have client B reconstruct the graph in a different order. B.delete() # Build order: # 1. introduce relationship-type 'R' # 2. introduce label 'L' # 3. introduce attribute 'x' B.query("CREATE ()-[:S]->()") B.query("CREATE ()-[:R]->()") B.query("CREATE (:K)") B.query("CREATE (:L)") B.query("MATCH (n) SET n.q = 1") B.query("MATCH (n) SET n.x = 1") # A's internal cached mapping is now out of sync # issue a query and make sure A's cache is synced. A.query("MATCH (n)-[e]->() RETURN n, e") assert (len(A._labels) == 2) assert (len(A._properties) == 2) assert (len(A._relationshipTypes) == 2) assert (A._labels[0] == 'K') assert (A._labels[1] == 'L') assert (A._properties[0] == 'q') assert (A._properties[1] == 'x') assert (A._relationshipTypes[0] == 'S') assert (A._relationshipTypes[1] == 'R')