def compare_uncached_to_cached_query_plans(self, query): global redis_con plan_graph = Graph('Cache_Test_plans', redis_con) uncached_plan = plan_graph.execution_plan(query) cached_plan = plan_graph.execution_plan(query) self.env.assertEqual(uncached_plan, cached_plan) plan_graph.delete()
def test07_index_after_encode_decode_in_v7(self): graph_name = "index_after_encode_decode_in_v7" redis_graph = Graph(graph_name, redis_con) redis_graph.query("CREATE INDEX ON :N(val)") # Verify indices exists. plan = redis_graph.execution_plan("MATCH (n:N {val:1}) RETURN n") self.env.assertIn("Index Scan", plan) # Save RDB & Load from RDB redis_con.execute_command("DEBUG", "RELOAD") # Verify indices exists after loading RDB. plan = redis_graph.execution_plan("MATCH (n:N {val:1}) RETURN n") self.env.assertIn("Index Scan", plan)
def test_CRUD_replication(self): # create a simple graph env = self.env source_con = env.getConnection() replica_con = env.getSlaveConnection() # enable write commands on slave, required as all RedisGraph # commands are registered as write commands replica_con.config_set("slave-read-only", "no") # perform CRUD operations # create a simple graph graph = Graph(GRAPH_ID, source_con) replica = Graph(GRAPH_ID, replica_con) s = Node(label='L', properties={'id': 0, 'name': 'a'}) t = Node(label='L', properties={'id': 1, 'name': 'b'}) e = Edge(s, 'R', t) graph.add_node(s) graph.add_node(t) graph.add_edge(e) graph.flush() # create index q = "CREATE INDEX ON :L(id)" graph.query(q) # update entity q = "MATCH (n:L {id:0}) SET n.id = 2, n.name = 'c'" graph.query(q) # delete entity q = "MATCH (n:L {id:1}) DELETE n" graph.query(q) # give replica some time to catch up time.sleep(1) # make sure index is available on replica q = "MATCH (s:L {id:2}) RETURN s.name" plan = graph.execution_plan(q) replica_plan = replica.execution_plan(q) env.assertIn("Index Scan", plan) self.env.assertEquals(replica_plan, plan) # issue query on both source and replica # make sure results are the same result = graph.query(q).result_set replica_result = replica.query(q).result_set self.env.assertEquals(replica_result, result)
def test_multi_hashjoins(self): # See issue https://github.com/RedisGraph/RedisGraph/issues/1124 # Construct a 4 node graph, (v1),(v2),(v3),(v4) graph = Graph(GRAPH_ID, self.env.getConnection()) a = Node(properties={"val": 1}) b = Node(properties={"val": 2}) c = Node(properties={"val": 3}) d = Node(properties={"val": 4}) graph.add_node(a) graph.add_node(b) graph.add_node(c) graph.add_node(d) graph.flush() # Find nodes a,b,c such that a.v = 1, a.v = b.v-1 and b.v = c.v-1 q = "MATCH (a {val:1}), (b), (c) WHERE a.val = b.val-1 AND b.val = c.val-1 RETURN a.val, b.val, c.val" plan = graph.execution_plan(q) # Make sure plan contains 2 Value Hash Join operations self.env.assertEquals(plan.count("Value Hash Join"), 2) # Validate results expected_result = [[1, 2, 3]] actual_result = graph.query(q) self.env.assertEquals(actual_result.result_set, expected_result)
def test08_multiple_graphs_with_index(self): # Create a multi-key graph. graph1_name = "v7_graph_1" graph1 = Graph(graph1_name, redis_con) graph1.query( "UNWIND range(0,21) AS i CREATE (a:L {v: i})-[:E]->(b:L2 {v: i})") # Create a single-key graph. graph2_name = "v7_graph_2" graph2 = Graph(graph2_name, redis_con) graph2.query("CREATE (a:L {v: 1})-[:E]->(b:L2 {v: 2})") # Add an index to the multi-key graph. graph1.query("CREATE INDEX ON :L(v)") # Save RDB and reload from RDB redis_con.execute_command("DEBUG", "RELOAD") # The load should be successful and the index should still be built. query = "MATCH (n:L {v:1}) RETURN n.v" plan = graph1.execution_plan(query) self.env.assertIn("Index Scan", plan) expected = [[1]] actual = graph1.query(query) self.env.assertEquals(actual.result_set, expected)
def test_v6_decode(self): graph_name = "v6_rdb_restore" # dump created with the following query (v6 supported property value: integer, double, boolean, string, null, array) # graph.query g "CREATE (:L1 {val:1, strval: 'str', numval: 5.5, nullval: NULL, boolval: true, array: [1,2,3]})-[:E{val:2}]->(:L2{val:3})" # graph.query g "CREATE INDEX ON :L1(val)" # dump g v6_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\x06\x05\x02g\x00\x02\x06\x05\x04val\x00\x05\astrval\x00\x05\anumval\x00\x05\bnullval\x00\x05\bboolval\x00\x05\x06array\x00\x02\x02\x02\x00\x05\x03L1\x00\x02\x01\x02\x00\x05\x04val\x00\x02\x01\x05\x03L2\x00\x02\x00\x02\x01\x02\x00\x05\x02E\x00\x02\x00\x02\x02\x02\x01\x02\x00\x02\x06\x05\x04val\x00\x02`\x00\x02\x01\x05\astrval\x00\x02H\x00\x05\x04str\x00\x05\anumval\x00\x02\x80\x00\x00@\x00\x04\x00\x00\x00\x00\x00\x00\x16@\x05\bnullval\x00\x02\x80\x00\x00\x80\x00\x05\bboolval\x00\x02P\x00\x02\x01\x05\x06array\x00\x02\b\x02\x03\x02`\x00\x02\x01\x02`\x00\x02\x02\x02`\x00\x02\x03\x02\x01\x02\x01\x02\x01\x05\x04val\x00\x02`\x00\x02\x03\x02\x01\x02\x00\x02\x01\x02\x00\x02\x01\x05\x04val\x00\x02`\x00\x02\x02\x00\t\x00\xd9\r\xb4c\xf2Z\xd9\xb3" redis_con.restore(graph_name, 0, v6_rdb, True) redis_graph = Graph(graph_name, redis_con) node0 = Node(node_id=0, label='L1', properties={ 'val': 1, 'strval': 'str', 'numval': 5.5, 'boolval': True, 'array': [1, 2, 3] }) node1 = Node(node_id=1, label='L2', properties={'val': 3}) edge01 = Edge(src_node=0, relation='E', dest_node=1, edge_id=0, properties={'val': 2}) results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m") self.env.assertEqual(results.result_set, [[node0, edge01, node1]]) plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n") self.env.assertIn("Index Scan", plan) results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n") self.env.assertEqual(results.result_set, [[node0]])
def test25_merge_with_where(self): redis_con = self.env.getConnection() graph = Graph("M", redis_con) # Index the "L:prop) combination so that the MERGE tree will not have a filter op. query = """CREATE INDEX ON :L(prop)""" graph.query(query) query = """MERGE (n:L {prop:1}) WITH n WHERE n.prop < 1 RETURN n.prop""" result = graph.query(query) plan = graph.execution_plan(query) # Verify that the Filter op follows a Project op. self.env.assertTrue(re.search('Project\s+Filter', plan)) # Verify that there is no Filter op after the Merge op. self.env.assertFalse(re.search('Merge\s+Filter', plan)) # Verify that the entity was created and no results were returned. self.env.assertEquals(result.nodes_created, 1) self.env.assertEquals(result.properties_set, 1) # Repeat the query. result = graph.query(query) # Verify that no data was modified and no results were returned. self.env.assertEquals(result.nodes_created, 0) self.env.assertEquals(result.properties_set, 0)
def test02_single_hop_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) graph.delete() # create graph query = "CREATE (a:A)-[:R {v:1}]->(b:B), (a)-[:R {v:2}]->(b)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge # there are multiple edges of type 'R' connecting the two query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN count(e)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(2, result.result_set[0][0]) query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN e.v ORDER BY e.v" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0]) self.env.assertEquals(2, result.result_set[1][0])
def test_execution_plan(self): redis_graph = Graph('execution_plan', self.r) create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" redis_graph.query(create_query) result = redis_graph.execution_plan( "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params", {'name': 'Yehuda'}) expected = "Results\n Project\n Conditional Traverse | (t:Team)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" self.assertEqual(result, expected) redis_graph.delete()
def test01_single_hop_no_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) try: graph.delete() except: pass # create graph query = "CREATE (:A)-[:R {v:1}]->(:B)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[:R]->(b) RETURN count(a)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0]) query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN e.v" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0])
def test04_multi_hop_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) graph.delete() # create graph query = "CREATE (a:A), (b:B), (i), (a)-[:R]->(i)-[:R]->(b), (a)-[:R]->(i)-[:R]->(b)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge # there are multiple ways to reach (b) from (a) query = "MATCH (a:A)-[*]->(b:B) WITH a,b MATCH (a)-[:R]->()-[]->(b) RETURN count(1)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(4, result.result_set[0][0])
def test03_multi_hop_no_multi_edge(self): redis_con = self.env.getConnection() graph = Graph(GRAPH_ID, redis_con) graph.delete() # create graph query = "CREATE (:A)-[:R]->()-[:R]->(:B)" graph.query(query) # make sure (a) is connected to (b) via a 'R' edge # expand-into inspects the result of (F*R*ADJ)[a,b] query = "MATCH (a:A)-[*]->(b:B) WITH a,b MATCH (a)-[:R]->()-[]->(b) RETURN count(a)" plan = graph.execution_plan(query) result = graph.query(query) self.env.assertIn("Expand Into", plan) self.env.assertEquals(1, result.result_set[0][0])
def test_v4_decode(self): graph_name = "v4_rdb_restore" # dump created with the following query (v4 supported property value: integer, double, boolean, string) # graph.query g "CREATE (:L1 {val:1, strval: 'str', doubleval: 5.5, boolval: true})-[:E{val:2}]->(:L2{val:3})" # graph.query g "CREATE INDEX ON :L1(val)" # dump g v4_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\x04\x05\x02g\x00\x02\x02\x02\x81\xff\xff\xff\xff\xff\xff\xff\xff\x05\x04ALL\x00\x02\x04\x05\aboolval\x05\tdoubleval\x05\x06strval\x05\x03val\x02\x00\x05\x03L1\x00\x02\x04\x05\aboolval\x05\tdoubleval\x05\x06strval\x05\x03val\x02\x01\x05\x03L2\x00\x02\x01\x05\x03val\x02\x01\x02\x81\xff\xff\xff\xff\xff\xff\xff\xff\x05\x04ALL\x00\x02\x01\x05\x03val\x02\x00\x05\x02E\x00\x02\x01\x05\x03val\x02\x02\x02\x00\x02\x01\x02\x00\x02\x04\x05\bboolval\x00\x02\x10\x02\x01\x05\ndoubleval\x00\x02@@\x04\x00\x00\x00\x00\x00\x00\x16@\x05\astrval\x00\x02A\x00\x05\x04str\x00\x05\x04val\x00\x02\x04\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x05\x04val\x00\x02\x04\x02\x03\x02\x01\x02\x00\x02\x00\x02\x01\x02\x00\x02\x01\x05\x04val\x00\x02\x04\x02\x02\x02\x01\x05\x03L1\x00\x05\x04val\x00\x00\t\x00\xb38\x87\x01U^\x8b\xe3" redis_con.restore(graph_name, 0, v4_rdb, True) redis_graph = Graph(graph_name, redis_con) node0 = Node(node_id=0, label='L1', properties={'val':1, 'strval': 'str', 'doubleval': 5.5, 'boolval': True}) node1 = Node(node_id=1, label='L2', properties={'val':3}) edge01 = Edge(src_node=0, relation='E', dest_node=1, edge_id=0, properties={'val':2}) results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m") self.env.assertEqual(results.result_set, [[node0, edge01, node1]]) plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n") self.env.assertIn("Index Scan", plan) results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n") self.env.assertEqual(results.result_set, [[node0]])
def test06_tag_separator(self): redis_con = self.env.getConnection() redis_graph = Graph("G", redis_con) # Create a single node with a long string property, introduce a comma as part of the string. query = """CREATE (:Node{value:"A ValuePartition is a pattern that describes a restricted set of classes from which a property can be associated. The parent class is used in restrictions, and the covering axiom means that only members of the subclasses may be used as values."})""" redis_graph.query(query) # Index property. query = """CREATE INDEX ON :Node(value)""" redis_graph.query(query) # Make sure node is returned by index scan. query = """MATCH (a:Node{value:"A ValuePartition is a pattern that describes a restricted set of classes from which a property can be associated. The parent class is used in restrictions, and the covering axiom means that only members of the subclasses may be used as values."}) RETURN a""" plan = redis_graph.execution_plan(query) result_set = redis_graph.query(query).result_set self.env.assertIn('Index Scan', plan) self.env.assertEqual(len(result_set), 1)
def test_v9_decode(self): graph_name = "v9_rdb_restore" # docker run -p 6379:6379 -it redislabs/redisgraph:2.4.10 # dump created with the following query (v9 supported property value: integer, double, boolean, string, null, array, point) # graph.query g "CREATE (:L1 {val:1, strval: 'str', numval: 5.5, nullval: NULL, boolval: true, array: [1,2,3], point: POINT({latitude: 32, longitude: 34})})-[:E{val:2}]->(:L2{val:3})" # graph.query g "CREATE INDEX ON :L1(val)" # graph.query g "CREATE INDEX ON :L1(none_existsing)" # graph.query g "CREATE (:L3)-[:E2]->(:L4)" # graph.query g "MATCH (n1:L3)-[r:E2]->(n2:L4) DELETE n1, r, n2" # dump g v9_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\t\x05\x02g\x00\x02\x02\x02\x01\x02\x04\x02\x02\x02\x00\x02\x00\x02\x01\x02\x05\x02\x01\x02\x02\x02\x02\x02\x02\x02\x03\x02\x01\x02\x04\x02\x01\x02\x05\x02\x01\x02\x00\x02\x01\x02\x00\x02\x06\x02\x00\x02`\x00\x02\x01\x02\x01\x02H\x00\x05\x04str\x00\x02\x02\x02\x80\x00\x00@\x00\x04\x00\x00\x00\x00\x00\x00\x16@\x02\x04\x02P\x00\x02\x01\x02\x05\x02\b\x02\x03\x02`\x00\x02\x01\x02`\x00\x02\x02\x02`\x00\x02\x03\x02\x06\x02\x80\x00\x02\x00\x00\x04\x00\x00\x00\x00\x00\x00@@\x04\x00\x00\x00\x00\x00\x00A@\x02\x01\x02\x01\x02\x01\x02\x01\x02\x00\x02`\x00\x02\x03\x02\x02\x02\x03\x02\x00\x02\x00\x02\x01\x02\x00\x02\x01\x02\x00\x02`\x00\x02\x02\x02\x01\x02\b\x05\x04val\x00\x05\astrval\x00\x05\anumval\x00\x05\bnullval\x00\x05\bboolval\x00\x05\x06array\x00\x05\x06point\x00\x05\x0fnone_existsing\x00\x02\x04\x02\x00\x05\x03L1\x00\x02\x02\x02\x01\x05\x04val\x00\x02\x01\x05\x0fnone_existsing\x00\x02\x01\x05\x03L2\x00\x02\x00\x02\x02\x05\x03L3\x00\x02\x00\x02\x03\x05\x03L4\x00\x02\x00\x02\x02\x02\x00\x05\x02E\x00\x02\x00\x02\x01\x05\x03E2\x00\x02\x00\x00\t\x00\xd7\xd0\x1cB;\xce\x1d>" redis_con.restore(graph_name, 0, v9_rdb, True) redis_graph = Graph(graph_name, redis_con) node0 = Node(node_id=0, label='L1', properties={'val': 1, 'strval': 'str', 'numval': 5.5, 'boolval': True, 'array': [1,2,3], 'point': {'latitude': 32, 'longitude': 34}}) node1 = Node(node_id=1, label='L2', properties={'val': 3}) edge01 = Edge(src_node=0, relation='E', dest_node=1, edge_id=0, properties={'val':2}) results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m") self.env.assertEqual(results.result_set, [[node0, edge01, node1]]) plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n") self.env.assertIn("Index Scan", plan) results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n") self.env.assertEqual(results.result_set, [[node0]])
def test09_test_query_graph_populate_nodes_labels(self): graph = Graph('G', self.redis_con) # create node with label L1 for the test in the next query # we need to make sure we replace the starting point of the traversal # from all nodes with label L1 to all nodes with label L2 query = """CREATE (a:L1 {v:0})-[:R1]->()""" query_result = graph.query(query) self.env.assertEquals(query_result.labels_added, 1) self.env.assertEquals(query_result.nodes_created, 2) self.env.assertEquals(query_result.relationships_created, 1) # node 'a' is mentioned twice in the following pattern # each time with a different label, when extracting a sub query-graph # we need to make sure all labels mentioned in the extracted pattern # are extracted. query = """MERGE ()-[:R2]->(a:L1)-[:R1]->(a:L2) RETURN *""" plan = graph.execution_plan(query) self.env.assertContains("Node By Label Scan | (a:L2)", plan) query_result = graph.query(query) self.env.assertEquals(query_result.nodes_created, 2) self.env.assertEquals(query_result.relationships_created, 2)
def test07_index_scan_and_id(self): redis_con = self.env.getConnection() redis_graph = Graph("G", redis_con) nodes = [] for i in range(10): node = Node(node_id=i, label='person', properties={'age': i}) nodes.append(node) redis_graph.add_node(node) redis_graph.flush() query = """CREATE INDEX ON :person(age)""" query_result = redis_graph.query(query) self.env.assertEqual(1, query_result.indices_created) query = """MATCH (n:person) WHERE id(n)>=7 AND n.age<9 RETURN n ORDER BY n.age""" plan = redis_graph.execution_plan(query) query_result = redis_graph.query(query) self.env.assertIn('Index Scan', plan) self.env.assertIn('Filter', plan) query_result = redis_graph.query(query) self.env.assertEqual(2, len(query_result.result_set)) expected_result = [[nodes[7]], [nodes[8]]] self.env.assertEquals(expected_result, query_result.result_set)
class BulkUpdate: """Handler class for emitting bulk update commands""" def __init__(self, graph_name, max_token_size, separator, no_header, filename, query, variable_name, client): self.separator = separator self.no_header = no_header self.query = " ".join(["UNWIND $rows AS", variable_name, query]) self.buffer_size = 0 self.max_token_size = max_token_size * 1024 * 1024 - utf8len( self.query) self.filename = filename self.graph_name = graph_name self.graph = Graph(graph_name, client) self.statistics = {} def update_statistics(self, result): for key, new_val in result.statistics.items(): try: val = self.statistics[key] except KeyError: val = 0 val += new_val self.statistics[key] = val def emit_buffer(self, rows): command = " ".join([rows, self.query]) result = self.graph.query(command) self.update_statistics(result) def quote_string(self, cell): cell = cell.strip() # Quote-interpolate cell if it is an unquoted string. try: float(cell) # Check for numeric except ValueError: if ((cell.lower() != 'false' and cell.lower() != 'true') and # Check for boolean (cell[0] != '[' and cell.lower != ']') and # Check for array (cell[0] != "\"" and cell[-1] != "\"") and # Check for double-quoted string (cell[0] != "\'" and cell[-1] != "\'")): # Check for single-quoted string cell = "".join(["\"", cell, "\""]) return cell # Raise an exception if the query triggers a compile-time error def validate_query(self): command = " ".join(["CYPHER rows=[]", self.query]) # The plan call will raise an error if the query is malformed or invalid. self.graph.execution_plan(command) def process_update_csv(self): entity_count = count_entities(self.filename) with open(self.filename, 'rt') as f: if self.no_header is False: next(f) # skip header reader = csv.reader(f, delimiter=self.separator, skipinitialspace=True, quoting=csv.QUOTE_NONE, escapechar='\\') rows_strs = [] with click.progressbar(reader, length=entity_count, label=self.graph_name) as reader: for row in reader: # Prepare the string representation of the current row. row = ",".join([self.quote_string(cell) for cell in row]) next_line = "".join(["[", row.strip(), "]"]) # Emit buffer now if the max token size would be exceeded by this addition. added_size = utf8len( next_line ) + 1 # Add one to compensate for the added comma. if self.buffer_size + added_size > self.max_token_size: # Concatenate all rows into a valid parameter set buf = "".join( ["CYPHER rows=[", ",".join(rows_strs), "]"]) self.emit_buffer(buf) rows_strs = [] self.buffer_size = 0 # Concatenate the string into the rows string representation. rows_strs.append(next_line) self.buffer_size += added_size # Concatenate all rows into a valid parameter set buf = "".join(["CYPHER rows=[", ",".join(rows_strs), "]"]) self.emit_buffer(buf)
def test_CRUD_replication(self): # create a simple graph env = self.env source_con = env.getConnection() replica_con = env.getSlaveConnection() # enable write commands on slave, required as all RedisGraph # commands are registered as write commands replica_con.config_set("slave-read-only", "no") # perform CRUD operations # create a simple graph graph = Graph(GRAPH_ID, source_con) replica = Graph(GRAPH_ID, replica_con) s = Node(label='L', properties={'id': 0, 'name': 'abcd'}) t = Node(label='L', properties={'id': 1, 'name': 'efgh'}) e = Edge(s, 'R', t) graph.add_node(s) graph.add_node(t) graph.add_edge(e) graph.flush() # create index q = "CREATE INDEX ON :L(id)" graph.query(q) # create full-text index q = "CALL db.idx.fulltext.createNodeIndex('L', 'name')" graph.query(q) # add fields to existing index q = "CALL db.idx.fulltext.createNodeIndex('L', 'title', 'desc')" graph.query(q) # create full-text index with index config q = "CALL db.idx.fulltext.createNodeIndex({label: 'L1', language: 'german', stopwords: ['a', 'b'] }, 'title', 'desc')" graph.query(q) # update entity q = "MATCH (n:L {id:1}) SET n.id = 2" graph.query(q) # delete entity q = "MATCH (n:L {id:0}) DELETE n" graph.query(q) # give replica some time to catch up time.sleep(1) # make sure index is available on replica q = "MATCH (s:L {id:2}) RETURN s.name" plan = graph.execution_plan(q) replica_plan = replica.execution_plan(q) env.assertIn("Index Scan", plan) self.env.assertEquals(replica_plan, plan) # issue query on both source and replica # make sure results are the same result = graph.query(q).result_set replica_result = replica.query(q).result_set self.env.assertEquals(replica_result, result) # make sure node count on both primary and replica is the same q = "MATCH (n) RETURN count(n)" result = graph.query(q).result_set replica_result = replica.query(q).result_set self.env.assertEquals(replica_result, result) # make sure nodes are in sync q = "MATCH (n) RETURN n ORDER BY n" result = graph.query(q).result_set replica_result = replica.query(q).result_set self.env.assertEquals(replica_result, result) # make sure both primary and replica have the same set of indexes q = "CALL db.indexes()" result = graph.query(q).result_set replica_result = replica.query(q).result_set self.env.assertEquals(replica_result, result) # drop fulltext index q = "CALL db.idx.fulltext.drop('L')" graph.query(q) # give replica some time to catch up time.sleep(1) # make sure both primary and replica have the same set of indexes q = "CALL db.indexes()" result = graph.query(q).result_set replica_result = replica.query(q).result_set self.env.assertEquals(replica_result, result)