コード例 #1
0
class testEmptyQuery(FlowTestsBase):
    def __init__(self):
        global graph
        self.env = Env(decodeResponses=True)
        graph = Graph('G', self.env.getConnection())

    def test01_empty_query(self):
        try:
            # execute empty query
            graph.query("")
        except ResponseError as e:
            self.env.assertIn("Error: empty query.", str(e))
コード例 #2
0
ファイル: test_profile.py プロジェクト: zsdlove/RedisGraph
class testProfile(FlowTestsBase):
    def __init__(self):
        global redis_con
        global redis_graph
        self.env = Env(decodeResponses=True)
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def test_profile(self):
        q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})"""
        profile = redis_con.execute_command("GRAPH.PROFILE", GRAPH_ID, q)
        profile = [x[0:x.index(',')].strip() for x in profile]

        self.env.assertIn("Create | Records produced: 3", profile)
        self.env.assertIn("Unwind | Records produced: 3", profile)

        q = "MATCH (p:Person) WHERE p.v > 1 RETURN p"
        profile = redis_con.execute_command("GRAPH.PROFILE", GRAPH_ID, q)
        profile = [x[0:x.index(',')].strip() for x in profile]

        self.env.assertIn("Results | Records produced: 2", profile)
        self.env.assertIn("Project | Records produced: 2", profile)
        self.env.assertIn("Filter | Records produced: 2", profile)
        self.env.assertIn(
            "Node By Label Scan | (p:Person) | Records produced: 3", profile)
コード例 #3
0
ファイル: base.py プロジェクト: zofuthan/RedisGraph
class FlowTestsBase(object):
    def __init__(self):
        self.env = Env()
        redis_con = self.env.getConnection()
        redis_con.execute_command("FLUSHALL")

    def _assert_equalish(self, a, b, e=0.05):
        delta = a * e
        diff = abs(a - b)
        if diff > delta:
            warnings.warn('runtimes differ by more than \"%f\" percent' % e)

    def _assert_only_expected_results_are_in_actual_results(
            self, actual_result, query_info):
        actual_result_set = []
        if actual_result.result_set is not None:
            actual_result_set = actual_result.result_set

        # Assert number of results.
        self.env.assertEqual(len(actual_result_set),
                             len(query_info.expected_result))

        # Assert actual values vs expected values.
        for res in query_info.expected_result:
            self.env.assertIn(res, actual_result_set)

    def _assert_actual_results_contained_in_expected_results(
            self, actual_result, query_info, num_contained_results):
        actual_result_set = actual_result.result_set

        # Assert num results.
        self.env.assertEqual(len(actual_result_set), num_contained_results)

        # Assert actual values vs expected values.
        expected_result = query_info.expected_result
        count = len(
            [res for res in expected_result if res in actual_result_set])

        # Assert number of different results is as expected.
        self.env.assertEqual(count, num_contained_results)

    def _assert_run_time(self, actual_result, query_info):
        if actual_result.run_time_ms > query_info.max_run_time_ms:
            warnings.warn(
                'Maximum runtime for query \"%s\" was: %s, but should be %s' %
                (query_info.description, str(actual_result.run_time_ms),
                 str(query_info.max_run_time_ms)))
コード例 #4
0
class test_prev_rdb_decode(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_con
        redis_con = self.env.getConnection()

    def tearDown(self):
        self.env.flush()

    def test_v4_decode(self):
        graph_name = "v4_rdb_restore"
        # dump created with the following query (v4 supported property value: integer, double, boolean, string)
        #  graph.query g "CREATE (:L1 {val:1, strval: 'str', doubleval: 5.5, boolval: true})-[:E{val:2}]->(:L2{val:3})"
        #  graph.query g "CREATE INDEX ON :L1(val)"
        #  dump g
        v4_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\x04\x05\x02g\x00\x02\x02\x02\x81\xff\xff\xff\xff\xff\xff\xff\xff\x05\x04ALL\x00\x02\x04\x05\aboolval\x05\tdoubleval\x05\x06strval\x05\x03val\x02\x00\x05\x03L1\x00\x02\x04\x05\aboolval\x05\tdoubleval\x05\x06strval\x05\x03val\x02\x01\x05\x03L2\x00\x02\x01\x05\x03val\x02\x01\x02\x81\xff\xff\xff\xff\xff\xff\xff\xff\x05\x04ALL\x00\x02\x01\x05\x03val\x02\x00\x05\x02E\x00\x02\x01\x05\x03val\x02\x02\x02\x00\x02\x01\x02\x00\x02\x04\x05\bboolval\x00\x02\x10\x02\x01\x05\ndoubleval\x00\x02@@\x04\x00\x00\x00\x00\x00\x00\x16@\x05\astrval\x00\x02A\x00\x05\x04str\x00\x05\x04val\x00\x02\x04\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x05\x04val\x00\x02\x04\x02\x03\x02\x01\x02\x00\x02\x00\x02\x01\x02\x00\x02\x01\x05\x04val\x00\x02\x04\x02\x02\x02\x01\x05\x03L1\x00\x05\x04val\x00\x00\t\x00\xb38\x87\x01U^\x8b\xe3"
        redis_con.restore(graph_name, 0, v4_rdb, True)
        redis_graph = Graph(graph_name, redis_con)
        node0 = Node(node_id=0, label='L1', properties={'val':1, 'strval': 'str', 'doubleval': 5.5, 'boolval': True})
        node1 = Node(node_id=1, label='L2', properties={'val':3})
        edge01 = Edge(src_node=0, relation='E', dest_node=1, edge_id=0, properties={'val':2})
        results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m")
        self.env.assertEqual(results.result_set, [[node0, edge01, node1]])
        plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n")
        self.env.assertIn("Index Scan", plan)
        results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n")
        self.env.assertEqual(results.result_set, [[node0]])

    def test_v6_decode(self):
        graph_name = "v6_rdb_restore"
        # dump created with the following query (v6 supported property value: integer, double, boolean, string, null, array)
        #  graph.query g "CREATE (:L1 {val:1, strval: 'str', numval: 5.5, nullval: NULL, boolval: true, array: [1,2,3]})-[:E{val:2}]->(:L2{val:3})"
        #  graph.query g "CREATE INDEX ON :L1(val)"
        #  dump g
        v6_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\x06\x05\x02g\x00\x02\x06\x05\x04val\x00\x05\astrval\x00\x05\anumval\x00\x05\bnullval\x00\x05\bboolval\x00\x05\x06array\x00\x02\x02\x02\x00\x05\x03L1\x00\x02\x01\x02\x00\x05\x04val\x00\x02\x01\x05\x03L2\x00\x02\x00\x02\x01\x02\x00\x05\x02E\x00\x02\x00\x02\x02\x02\x01\x02\x00\x02\x06\x05\x04val\x00\x02`\x00\x02\x01\x05\astrval\x00\x02H\x00\x05\x04str\x00\x05\anumval\x00\x02\x80\x00\x00@\x00\x04\x00\x00\x00\x00\x00\x00\x16@\x05\bnullval\x00\x02\x80\x00\x00\x80\x00\x05\bboolval\x00\x02P\x00\x02\x01\x05\x06array\x00\x02\b\x02\x03\x02`\x00\x02\x01\x02`\x00\x02\x02\x02`\x00\x02\x03\x02\x01\x02\x01\x02\x01\x05\x04val\x00\x02`\x00\x02\x03\x02\x01\x02\x00\x02\x01\x02\x00\x02\x01\x05\x04val\x00\x02`\x00\x02\x02\x00\t\x00\xd9\r\xb4c\xf2Z\xd9\xb3"
        redis_con.restore(graph_name, 0, v6_rdb, True)
        redis_graph = Graph(graph_name, redis_con)
        node0 = Node(node_id=0, label='L1', properties={'val': 1, 'strval': 'str', 'numval': 5.5, 'boolval': True, 'array': [1,2,3]})
        node1 = Node(node_id=1, label='L2', properties={'val': 3})
        edge01 = Edge(src_node=0, relation='E', dest_node=1, edge_id=0, properties={'val':2})
        results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m")
        self.env.assertEqual(results.result_set, [[node0, edge01, node1]])
        plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n")
        self.env.assertIn("Index Scan", plan)
        results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n")
        self.env.assertEqual(results.result_set, [[node0]])
コード例 #5
0
ファイル: test_slowlog.py プロジェクト: kokizzu/RedisGraph
class testSlowLog(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_con
        global redis_graph

        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def test_slowlog(self):
        # Slowlog should fail when graph doesn't exists.
        try:
            slowlog = redis_con.execute_command(
                "GRAPH.SLOWLOG NONE_EXISTING_GRAPH")
        except ResponseError as e:
            self.env.assertIn("Invalid graph operation on empty key", str(e))

        # Issue create query twice.
        redis_graph.query("""CREATE ()""")
        redis_graph.query("""CREATE ()""")

        # Slow log should contain a single entry, no duplicates.
        slowlog = redis_con.execute_command("GRAPH.SLOWLOG " + GRAPH_ID)
        self.env.assertEquals(len(slowlog), 1)

        # Saturate slowlog.
        for i in range(1024):
            q = """CREATE ({v:%s})""" % i
            redis_graph.query(q)
        A = redis_con.execute_command("GRAPH.SLOWLOG " + GRAPH_ID)
        B = redis_con.execute_command("GRAPH.SLOWLOG " + GRAPH_ID)

        # Calling slowlog multiple times should preduce the same result.
        self.env.assertEquals(A, B)

        # Issue a long running query, this should replace an existing entry in the slowlog.
        q = """MATCH (n), (m) WHERE n.v > 0 AND n.v < 500 SET m.v = rand() WITH n, m RETURN SUM(n.v + m.v)"""
        redis_graph.query(q)
        B = redis_con.execute_command("GRAPH.SLOWLOG " + GRAPH_ID)

        self.env.assertNotEqual(A, B)
コード例 #6
0
class TestAggregate():
    def __init__(self):
        self.env = Env()
        add_values(self.env)

    def testGroupBy(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'SORTBY', 2, '@count', 'desc',
            'LIMIT', '0', '5'
        ]

        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual([
            292L, ['brand', '', 'count', '1518'],
            ['brand', 'mad catz', 'count', '43'],
            ['brand', 'generic', 'count', '40'],
            ['brand', 'steelseries', 'count', '37'],
            ['brand', 'logitech', 'count', '35']
        ], res)

    def testMinMax(self):
        cmd = [
            'ft.aggregate', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'count', '0', 'REDUCE', 'min', '1', '@price', 'as',
            'minPrice', 'SORTBY', '2', '@minPrice', 'DESC'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        row = to_dict(res[1])
        self.env.assertEqual(88, int(float(row['minPrice'])))

        cmd = [
            'ft.aggregate', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'count', '0', 'REDUCE', 'max', '1', '@price', 'as',
            'maxPrice', 'SORTBY', '2', '@maxPrice', 'DESC'
        ]
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        self.env.assertEqual(695, int(float(row['maxPrice'])))

    def testAvg(self):
        cmd = [
            'ft.aggregate', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'avg', '1', '@price', 'AS', 'avg_price', 'REDUCE',
            'count', '0', 'SORTBY', '2', '@avg_price', 'DESC'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual(26, res[0])
        # Ensure the formatting actually exists

        first_row = to_dict(res[1])
        self.env.assertEqual(109, int(float(first_row['avg_price'])))

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertIn('avg_price', row)

        # Test aliasing
        cmd = [
            'FT.AGGREGATE', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'avg', '1', '@price', 'AS', 'avgPrice'
        ]
        res = self.env.cmd(*cmd)
        first_row = to_dict(res[1])
        self.env.assertEqual(17, int(float(first_row['avgPrice'])))

    def testCountDistinct(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT_DISTINCT', '1', '@title', 'AS', 'count_distinct(title)',
            'REDUCE', 'COUNT', '0'
        ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1484, int(row['count_distinct(title)']))

        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT_DISTINCTISH', '1', '@title', 'AS',
            'count_distinctish(title)', 'REDUCE', 'COUNT', '0'
        ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1461, int(row['count_distinctish(title)']))

    def testQuantile(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'QUANTILE', '2', '@price', '0.50', 'AS', 'q50', 'REDUCE',
            'QUANTILE', '2', '@price', '0.90', 'AS', 'q90', 'REDUCE',
            'QUANTILE', '2', '@price', '0.95', 'AS', 'q95', 'REDUCE', 'AVG',
            '1', '@price', 'REDUCE', 'COUNT', '0', 'AS', 'rowcount', 'SORTBY',
            '2', '@rowcount', 'DESC', 'MAX', '1'
        ]

        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        # TODO: Better samples
        self.env.assertAlmostEqual(14.99, float(row['q50']), delta=3)
        self.env.assertAlmostEqual(70, float(row['q90']), delta=50)

        # This tests the 95th percentile, which is error prone because
        # so few samples actually exist. I'm disabling it for now so that
        # there is no breakage in CI
        # self.env.assertAlmostEqual(110, (float(row['q95'])), delta=50)

    def testStdDev(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'STDDEV', '1', '@price', 'AS', 'stddev(price)', 'REDUCE', 'AVG',
            '1', '@price', 'AS', 'avgPrice', 'REDUCE', 'QUANTILE', '2',
            '@price', '0.50', 'AS', 'q50Price', 'REDUCE', 'COUNT', '0', 'AS',
            'rowcount', 'SORTBY', '2', '@rowcount', 'DESC', 'LIMIT', '0', '10'
        ]
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])

        self.env.assertTrue(10 <= int(float(row['q50Price'])) <= 20)
        self.env.assertAlmostEqual(53,
                                   int(float(row['stddev(price)'])),
                                   delta=50)
        self.env.assertEqual(29, int(float(row['avgPrice'])))

    def testParseTime(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT', '0', 'AS', 'count', 'APPLY', 'timefmt(1517417144)', 'AS',
            'dt', 'APPLY', 'parse_time("%FT%TZ", @dt)', 'as', 'parsed_dt',
            'LIMIT', '0', '1'
        ]
        res = self.env.cmd(*cmd)

        self.env.assertEqual([
            'brand', '', 'count', '1518', 'dt', '2018-01-31T16:45:44Z',
            'parsed_dt', '1517417144'
        ], res[1])

    def testRandomSample(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT', '0', 'AS', 'num', 'REDUCE', 'RANDOM_SAMPLE', '2',
            '@price', '10', 'SORTBY', '2', '@num', 'DESC', 'MAX', '10'
        ]
        for row in self.env.cmd(*cmd)[1:]:
            self.env.assertIsInstance(row[5], list)
            self.env.assertGreater(len(row[5]), 0)
            self.env.assertGreaterEqual(row[3], len(row[5]))

            self.env.assertLessEqual(len(row[5]), 10)

    def testTimeFunctions(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'APPLY', '1517417144', 'AS', 'dt',
            'APPLY', 'timefmt(@dt)', 'AS', 'timefmt', 'APPLY', 'day(@dt)',
            'AS', 'day', 'APPLY', 'hour(@dt)', 'AS', 'hour', 'APPLY',
            'minute(@dt)', 'AS', 'minute', 'APPLY', 'month(@dt)', 'AS',
            'month', 'APPLY', 'dayofweek(@dt)', 'AS', 'dayofweek', 'APPLY',
            'dayofmonth(@dt)', 'AS', 'dayofmonth', 'APPLY', 'dayofyear(@dt)',
            'AS', 'dayofyear', 'APPLY', 'year(@dt)', 'AS', 'year', 'LIMIT',
            '0', '1'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertListEqual([
            1L,
            [
                'dt', '1517417144', 'timefmt', '2018-01-31T16:45:44Z', 'day',
                '1517356800', 'hour', '1517414400', 'minute', '1517417100',
                'month', '1514764800', 'dayofweek', '3', 'dayofmonth', '31',
                'dayofyear', '30', 'year', '2018'
            ]
        ], res)

    def testStringFormat(self):
        cmd = [
            'FT.AGGREGATE', 'games', '@brand:sony', 'GROUPBY', '2', '@title',
            '@brand', 'REDUCE', 'COUNT', '0', 'REDUCE', 'MAX', '1', '@price',
            'AS', 'price', 'APPLY',
            'format("%s|%s|%s|%s", @title, @brand, "Mark", @price)', 'as',
            'titleBrand', 'LIMIT', '0', '10'
        ]
        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            expected = '%s|%s|%s|%g' % (row['title'], row['brand'], 'Mark',
                                        float(row['price']))
            self.env.assertEqual(expected, row['titleBrand'])

    def testSum(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'REDUCE', 'sum', 1, '@price', 'AS',
            'sum(price)', 'SORTBY', 2, '@sum(price)', 'desc', 'LIMIT', '0', '5'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertEqual([
            292L, ['brand', '', 'count', '1518', 'sum(price)', '44780.69'],
            ['brand', 'mad catz', 'count', '43', 'sum(price)', '3973.48'],
            ['brand', 'razer', 'count', '26', 'sum(price)', '2558.58'],
            ['brand', 'logitech', 'count', '35', 'sum(price)', '2329.21'],
            ['brand', 'steelseries', 'count', '37', 'sum(price)', '1851.12']
        ], res)

    def testFilter(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'FILTER', '@count > 5'
        ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertGreater(int(row['count']), 5)

        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'FILTER', '@count < 5', 'FILTER',
            '@count > 2 && @brand != ""'
        ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertLess(int(row['count']), 5)
            self.env.assertGreater(int(row['count']), 2)

    def testToList(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count_distinct', '1', '@price', 'as', 'count', 'REDUCE', 'tolist',
            1, '@price', 'as', 'prices', 'SORTBY', 2, '@count', 'desc',
            'LIMIT', '0', '5'
        ]
        res = self.env.cmd(*cmd)

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertEqual(int(row['count']), len(row['prices']))

    def testSortBy(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1',
                           '@brand', 'REDUCE', 'sum', 1, '@price', 'as',
                           'price', 'SORTBY', 2, '@price', 'desc', 'LIMIT',
                           '0', '2')

        self.env.assertListEqual([
            292L, ['brand', '', 'price', '44780.69'],
            ['brand', 'mad catz', 'price', '3973.48']
        ], res)

        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1',
                           '@brand', 'REDUCE', 'sum', 1, '@price', 'as',
                           'price', 'SORTBY', 2, '@price', 'asc', 'LIMIT', '0',
                           '2')

        self.env.assertListEqual([
            292L, ['brand', 'myiico', 'price', '0.23'],
            ['brand', 'crystal dynamics', 'price', '0.25']
        ], res)

        # Test MAX with limit higher than it
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1',
                           '@brand', 'REDUCE', 'sum', 1, '@price', 'as',
                           'price', 'SORTBY', 2, '@price', 'asc', 'MAX', 2)

        self.env.assertListEqual([
            292L, ['brand', 'myiico', 'price', '0.23'],
            ['brand', 'crystal dynamics', 'price', '0.25']
        ], res)

        # Test Sorting by multiple properties
        res = self.env.cmd(
            'ft.aggregate',
            'games',
            '*',
            'GROUPBY',
            '1',
            '@brand',
            'REDUCE',
            'sum',
            1,
            '@price',
            'as',
            'price',
            'APPLY',
            '(@price % 10)',
            'AS',
            'price',
            'SORTBY',
            4,
            '@price',
            'asc',
            '@brand',
            'desc',
            'MAX',
            10,
        )
        self.env.assertListEqual([
            292L, ['brand', 'zps', 'price', '0'],
            ['brand', 'zalman', 'price', '0'], [
                'brand', 'yoozoo', 'price', '0'
            ], ['brand', 'white label', 'price', '0'],
            ['brand', 'stinky', 'price', '0'],
            ['brand', 'polaroid', 'price', '0'],
            ['brand', 'plantronics', 'price', '0'],
            ['brand', 'ozone', 'price', '0'], ['brand', 'oooo', 'price', '0'],
            ['brand', 'neon', 'price', '0']
        ], res)

    def testExpressions(self):
        pass

    def testNoGroup(self):
        res = self.env.cmd(
            'ft.aggregate',
            'games',
            '*',
            'LOAD',
            '2',
            '@brand',
            '@price',
            'APPLY',
            'floor(sqrt(@price)) % 10',
            'AS',
            'price',
            'SORTBY',
            4,
            '@price',
            'desc',
            '@brand',
            'desc',
            'MAX',
            5,
        )
        exp = [
            2265L, ['brand', 'xbox', 'price', '9'],
            ['brand', 'turtle beach', 'price', '9'],
            ['brand', 'trust', 'price', '9'],
            ['brand', 'steelseries', 'price', '9'],
            ['brand', 'speedlink', 'price', '9']
        ]
        # exp = [2265L, ['brand', 'Xbox', 'price', '9'], ['brand', 'Turtle Beach', 'price', '9'], [
        #  'brand', 'Trust', 'price', '9'], ['brand', 'SteelSeries', 'price', '9'], ['brand', 'Speedlink', 'price', '9']]
        self.env.assertListEqual(exp[1], res[1])

    def testLoad(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '3', '@brand',
                           '@price', '@nonexist', 'SORTBY', 2, '@price',
                           'DESC', 'MAX', 2)
        exp = [
            3L, ['brand', '', 'price', '759.12'],
            ['brand', 'Sony', 'price', '695.8']
        ]
        self.env.assertEqual(exp[1], res[1])

    def testSplit(self):
        res = self.env.cmd(
            'ft.aggregate', 'games', '*', 'APPLY',
            'split("hello world,  foo,,,bar,", ",", " ")', 'AS', 'strs',
            'APPLY', 'split("hello world,  foo,,,bar,", " ", ",")', 'AS',
            'strs2', 'APPLY', 'split("hello world,  foo,,,bar,", "", "")',
            'AS', 'strs3', 'APPLY', 'split("hello world,  foo,,,bar,")', 'AS',
            'strs4', 'APPLY', 'split("hello world,  foo,,,bar,",",")', 'AS',
            'strs5', 'APPLY', 'split("")', 'AS', 'empty', 'LIMIT', '0', '1')
        # print "Got {} results".format(len(res))
        # return
        # pprint.pprint(res)
        self.env.assertListEqual([
            1L,
            [
                'strs', ['hello world', 'foo', 'bar'], 'strs2',
                ['hello', 'world', 'foo,,,bar'], 'strs3',
                ['hello world,  foo,,,bar,'], 'strs4',
                ['hello world', 'foo', 'bar'], 'strs5',
                ['hello world', 'foo', 'bar'], 'empty', []
            ]
        ], res)

    def testFirstValue(self):
        res = self.env.cmd(
            'ft.aggregate', 'games',
            '@brand:(sony|matias|beyerdynamic|(mad catz))', 'GROUPBY', 1,
            '@brand', 'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price',
            'DESC', 'AS', 'top_item', 'REDUCE', 'FIRST_VALUE', 4, '@price',
            'BY', '@price', 'DESC', 'AS', 'top_price', 'REDUCE', 'FIRST_VALUE',
            4, '@title', 'BY', '@price', 'ASC', 'AS', 'bottom_item', 'REDUCE',
            'FIRST_VALUE', 4, '@price', 'BY', '@price', 'ASC', 'AS',
            'bottom_price', 'SORTBY', 2, '@top_price', 'DESC', 'MAX', 5)
        expected = [
            4L,
            [
                'brand', 'sony', 'top_item',
                'sony psp slim &amp; lite 2000 console', 'top_price', '695.8',
                'bottom_item',
                'sony dlchd20p high speed hdmi cable for playstation 3',
                'bottom_price', '5.88'
            ],
            [
                'brand', 'matias', 'top_item', 'matias halfkeyboard usb',
                'top_price', '559.99', 'bottom_item',
                'matias halfkeyboard usb', 'bottom_price', '559.99'
            ],
            [
                'brand', 'beyerdynamic', 'top_item',
                'beyerdynamic mmx300 pc gaming premium digital headset with microphone',
                'top_price', '359.74', 'bottom_item',
                'beyerdynamic headzone pc gaming digital surround sound system with mmx300 digital headset with microphone',
                'bottom_price', '0'
            ],
            [
                'brand', 'mad catz', 'top_item',
                'mad catz s.t.r.i.k.e.7 gaming keyboard', 'top_price',
                '295.95', 'bottom_item',
                'madcatz mov4545 xbox replacement breakaway cable',
                'bottom_price', '3.49'
            ]
        ]
        self.env.assertListEqual(expected, res)

    def testLoadAfterGroupBy(self):
        with self.env.assertResponseError():
            self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', 1, '@brand',
                         'LOAD', 1, '@brand')
コード例 #7
0
class testExpandInto():
    def __init__(self):
        self.env = Env(decodeResponses=True)

    # test expand into single hop no multi-edge
    # (:A)-[:R]->(:B)
    def test01_single_hop_no_multi_edge(self):
        redis_con = self.env.getConnection()
        graph = Graph(GRAPH_ID, redis_con)
        try:
            graph.delete()
        except:
            pass

        # create graph
        query = "CREATE (:A)-[:R {v:1}]->(:B)"
        graph.query(query)

        # make sure (a) is connected to (b) via a 'R' edge
        query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[:R]->(b) RETURN count(a)"
        plan = graph.execution_plan(query)
        result = graph.query(query)
        self.env.assertIn("Expand Into", plan)
        self.env.assertEquals(1, result.result_set[0][0])

        query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN e.v"
        plan = graph.execution_plan(query)
        result = graph.query(query)
        self.env.assertIn("Expand Into", plan)
        self.env.assertEquals(1, result.result_set[0][0])

    # test expand into single hop multi-edge
    # (:A)-[:R]->(:B), (:A)-[:R]->(:B)
    def test02_single_hop_multi_edge(self):
        redis_con = self.env.getConnection()
        graph = Graph(GRAPH_ID, redis_con)
        graph.delete()

        # create graph
        query = "CREATE (a:A)-[:R {v:1}]->(b:B), (a)-[:R {v:2}]->(b)"
        graph.query(query)

        # make sure (a) is connected to (b) via a 'R' edge
        # there are multiple edges of type 'R' connecting the two
        query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN count(e)"
        plan = graph.execution_plan(query)
        result = graph.query(query)
        self.env.assertIn("Expand Into", plan)
        self.env.assertEquals(2, result.result_set[0][0])

        query = "MATCH (a:A)-[]->(b:B) WITH a,b MATCH (a)-[e:R]->(b) RETURN e.v ORDER BY e.v"
        plan = graph.execution_plan(query)
        result = graph.query(query)
        self.env.assertIn("Expand Into", plan)
        self.env.assertEquals(1, result.result_set[0][0])
        self.env.assertEquals(2, result.result_set[1][0])

    # test expand into multiple hops with no multi-edge
    # (:A)-[:R]->()-[:R]->(:B)
    def test03_multi_hop_no_multi_edge(self):
        redis_con = self.env.getConnection()
        graph = Graph(GRAPH_ID, redis_con)
        graph.delete()

        # create graph
        query = "CREATE (:A)-[:R]->()-[:R]->(:B)"
        graph.query(query)

        # make sure (a) is connected to (b) via a 'R' edge
        # expand-into inspects the result of (F*R*ADJ)[a,b]
        query = "MATCH (a:A)-[*]->(b:B) WITH a,b MATCH (a)-[:R]->()-[]->(b) RETURN count(a)"
        plan = graph.execution_plan(query)
        result = graph.query(query)
        self.env.assertIn("Expand Into", plan)
        self.env.assertEquals(1, result.result_set[0][0])

    # test expand into multiple hops with multi-edge
    # (a:A)-[:R]->(i)-[:R]->(b:B)
    # (a:A)-[:R]->(i)-[:R]->(b:B)
    def test04_multi_hop_multi_edge(self):
        redis_con = self.env.getConnection()
        graph = Graph(GRAPH_ID, redis_con)
        graph.delete()

        # create graph
        query = "CREATE (a:A), (b:B), (i), (a)-[:R]->(i)-[:R]->(b), (a)-[:R]->(i)-[:R]->(b)"
        graph.query(query)

        # make sure (a) is connected to (b) via a 'R' edge
        # there are multiple ways to reach (b) from (a)
        query = "MATCH (a:A)-[*]->(b:B) WITH a,b MATCH (a)-[:R]->()-[]->(b) RETURN count(1)"
        plan = graph.execution_plan(query)
        result = graph.query(query)
        self.env.assertIn("Expand Into", plan)
        self.env.assertEquals(4, result.result_set[0][0])
コード例 #8
0
class testQueryValidationFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_con
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph("G", redis_con)
        self.populate_graph()

    def populate_graph(self):
        # Create a single graph.
        global redis_graph
        node = Node(properties={"age": 34})
        redis_graph.add_node(node)
        redis_graph.commit()

    # Expect an error when trying to use a function which does not exists.
    def test01_none_existing_function(self):
        query = """MATCH (n) RETURN noneExistingFunc(n.age) AS cast"""
        try:
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    # Make sure function validation is type case insensitive.
    def test02_case_insensitive_function_name(self):
        try:
            query = """MATCH (n) RETURN mAx(n.age)"""
            redis_graph.query(query)
        except redis.exceptions.ResponseError:
            # function validation should be case insensitive.
            self.env.assertTrue(False)

    def test03_edge_missing_relation_type(self):
        try:
            query = """CREATE (n:Person {age:32})-[]->(:person {age:30})"""
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test04_escaped_quotes(self):
        query = r"CREATE (:escaped{prop1:'single \' char', prop2: 'double \" char', prop3: 'mixed \' and \" chars'})"
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.nodes_created, 1)
        self.env.assertEquals(actual_result.properties_set, 3)

        query = r"MATCH (a:escaped) RETURN a.prop1, a.prop2, a.prop3"
        actual_result = redis_graph.query(query)
        expected_result = [[
            "single ' char", 'double " char', 'mixed \' and " chars'
        ]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test05_invalid_entity_references(self):
        try:
            query = """MATCH (a) RETURN e"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

        try:
            query = """MATCH (a) RETURN a ORDER BY e"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test06_where_references(self):
        try:
            query = """MATCH (a) WHERE fake = true RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test07_with_references(self):
        try:
            query = """MATCH (a) WITH e RETURN e"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test08_count_distinct_star(self):
        try:
            query = """MATCH (a) RETURN COUNT(DISTINCT *)"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test09_invalid_apply_all(self):
        try:
            query = """MATCH (a) RETURN SUM(*)"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test10_missing_params(self):
        try:
            query = """MATCH (a {name:$name}) RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test11_param_error(self):
        try:
            query = """CYPHER name=({name:'a'}) MATCH (a {name:$name}) RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test12_invalid_query_order(self):
        try:
            query = """MERGE (a) MATCH (a)-[]->(b) RETURN b"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test13_create_bound_variables(self):
        try:
            query = """MATCH (a)-[e]->(b) CREATE (a)-[e]->(b)"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test14_treat_path_as_entity(self):
        redis_graph.query("CREATE ()-[:R]->()")
        try:
            query = """MATCH x=()-[]->() RETURN x.name"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    def test15_dont_crash_on_multiple_errors(self):
        try:
            query = """MATCH (a) where id(a) IN range(0) OR id(a) in range(1)"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError:
            # Expecting an error.
            pass

    # Run a query in which a parsed parameter introduces a type in an unsupported context.
    def test16_param_introduces_unhandled_type(self):
        try:
            query = """CYPHER props={a:1,b:2} CREATE (a:A $props)"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Encountered unhandled type" in str(e))
            pass

    # Validate that the module fails properly with incorrect argument counts.
    def test17_query_arity(self):
        # Call GRAPH.QUERY with a missing query argument.
        try:
            res = redis_con.execute_command("GRAPH.QUERY", "G")
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("wrong number of arguments" in str(e))
            pass

    # Run queries in which compile-time variables are accessed but not defined.
    def test18_undefined_variable_access(self):
        try:
            query = """CREATE (:person{name:bar[1]})"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("not defined" in str(e))
            pass

        try:
            query = """MATCH (a {val: undeclared}) RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("not defined" in str(e))
            pass

        try:
            query = """UNWIND [fake] AS ref RETURN ref"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("not defined" in str(e))
            pass

    def test19_invalid_cypher_options(self):
        query = "EXPLAIN MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        query = "PROFILE MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        query = "CYPHER val=1 EXPLAIN MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        query = "CYPHER val=1 PROFILE MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

    # Undirected edges are not allowed in CREATE clauses.
    def test20_undirected_edge_creation(self):
        try:
            query = """CREATE (:Endpoint)-[:R]-(:Endpoint)"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Only directed relationships" in str(e))
            pass

    # Applying a filter for non existing entity.
    def test20_non_existing_graph_entity(self):
        try:
            query = """MATCH p=() WHERE p.name='value' RETURN p"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Type mismatch: expected Node but was Path" in str(e))
            pass

    # Comments should not affect query functionality.
    def test21_ignore_query_comments(self):
        query = """MATCH (n)  // This is a comment
                   /* This is a block comment */
                   WHERE EXISTS(n.age)
                   RETURN n.age /* Also a block comment*/"""
        actual_result = redis_graph.query(query)
        expected_result = [[34]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """/* A block comment*/ MATCH (n)  // This is a comment
                /* This is a block comment */
                WHERE EXISTS(n.age)
                RETURN n.age /* Also a block comment*/"""
        actual_result = redis_graph.query(query)
        expected_result = [[34]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """// This is a comment
                MATCH (n)  // This is a comment
                /* This is a block comment */
                WHERE EXISTS(n.age)
                RETURN n.age /* Also a block comment*/"""
        actual_result = redis_graph.query(query)
        expected_result = [[34]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH (n)  /* This is a block comment */ WHERE EXISTS(n.age)
                RETURN n.age /* Also a block comment*/"""
        actual_result = redis_graph.query(query)
        expected_result = [[34]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Validate procedure call refrences and definitions
    def test22_procedure_validations(self):
        try:
            # procedure call refering to a none existing alias 'n'
            query = """CALL db.idx.fulltext.queryNodes(n, 'B') YIELD node RETURN node"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("not defined" in str(e))
            pass

        # refer to procedure call original output when output is aliased.
        try:
            query = """CALL db.idx.fulltext.queryNodes('A', 'B') YIELD node AS n RETURN node"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("not defined" in str(e))
            pass

        # valid procedure call, no output aliasing
        query = """CALL db.idx.fulltext.queryNodes('A', 'B') YIELD node RETURN node"""
        redis_graph.query(query)

        # valid procedure call, output aliasing
        query = """CALL db.idx.fulltext.queryNodes('A', 'B') YIELD node AS n RETURN n"""
        redis_graph.query(query)

    # Applying a filter for a non-boolean constant should raise a compile-time error.
    def test23_invalid_constant_filter(self):
        try:
            query = """MATCH (a) WHERE 1 RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            assert ("Expected boolean predicate" in str(e))
            pass

    # Referencing a variable before defining it should raise a compile-time error.
    def test24_reference_before_definition(self):
        try:
            query = """MATCH ({prop: reference}) MATCH (reference) RETURN *"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("not defined" in str(e))
            pass

    # Invalid filters in cartesian products should raise errors.
    def test25_cartesian_product_invalid_filter(self):
        try:
            query = """MATCH p1=(), (n), ({prop: p1.path_val}) RETURN *"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Type mismatch: expected Node but was Path" in str(e))
            pass

    # Scalar predicates in filters should raise errors.
    def test26_invalid_filter_predicate(self):
        try:
            query = """WITH 1 AS a WHERE '' RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Expected boolean predicate" in str(e))
            pass

    # Conditional filters with non-boolean scalar predicate children should raise errors.
    def test27_invalid_filter_predicate_child(self):
        try:
            # 'Amor' is an invalid construct for the RHS of 'OR'.
            query = """MATCH (a:Author) WHERE a.name CONTAINS 'Ernest' OR 'Amor' RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Expected boolean predicate" in str(e))
            pass

    # The NOT operator does not compare left and right side expressions.
    def test28_invalid_filter_binary_not(self):
        try:
            # Query should have been:
            # MATCH (u) where u.v IS NOT NULL RETURN u
            query = """MATCH (u) where u.v NOT NULL RETURN u"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Invalid usage of 'NOT' filter" in str(e))
            pass

    def test29_invalid_filter_non_boolean_constant(self):
        try:
            query = """MATCH (a) WHERE a RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            assert ("expected Boolean but was Node" in str(e))
            pass

        try:
            query = """MATCH (a) WHERE 1+rand() RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            assert ("expected Boolean but was Float" in str(e))
            pass

        try:
            query = """CYPHER p=3 WITH 1 AS a WHERE $p RETURN a"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            assert ("expected Boolean but was Integer" in str(e))
            pass

        # 'val' is a boolean, so this query is valid.
        query = """WITH true AS val WHERE val return val"""
        redis_graph.query(query)

        # Non-existent properties are treated as NULLs, which are boolean in Cypher's 3-valued logic.
        query = """MATCH (a) WHERE a.fakeprop RETURN a"""
        redis_graph.query(query)

    # Encountering traversals as property values or ORDER BY expressions should raise compile-time errors.
    def test30_unexpected_traversals(self):
        queries = [
            """MATCH (a {prop: ()-[]->()}) RETURN a""",
            """MATCH (a) RETURN a ORDER BY (a)-[]->()""",
            """MATCH (a) RETURN (a)-[]->()"""
        ]
        for query in queries:
            try:
                redis_graph.query(query)
                assert (False)
            except redis.exceptions.ResponseError as e:
                # Expecting an error.
                assert ("Encountered path traversal" in str(e))

    def test31_set_invalid_property_type(self):
        # Skip this test if running under Valgrind, as it causes a memory leak.
        if Env().envRunner.debugger is not None:
            Env().skip()

        queries = [
            """MATCH (a) CREATE (:L {v: a})""",
            """MATCH (a), (b) WHERE b.age IS NOT NULL SET b.age = a""",
            """MERGE (a) ON MATCH SET a.age = a"""
        ]
        for q in queries:
            try:
                redis_graph.query(q)
                assert (False)
            except redis.exceptions.ResponseError as e:
                # Expecting an error.
                assert ("Property values can only be of primitive types"
                        in str(e))
                pass

    def test32_return_following_clauses(self):
        # After a RETURN clause we're expecting only the following clauses:
        # SKIP, LIMIT, ORDER-BY and UNION, given that SKIP and LIMIT are
        # actually attributes of the RETURN clause this leaves us with
        # ORDER-BY and UNION.

        invalid_queries = [
            """RETURN 1 CREATE ()""", """RETURN 1 RETURN 2""",
            """MATCH(n) RETURN n DELETE n""",
            """MATCH(n) RETURN n SET n.v = 1""", """RETURN 1 MERGE ()""",
            """RETURN 1 MATCH (n) RETURN n""",
            """RETURN 1 WITH 1 as one RETURN one"""
        ]

        # Invalid queries, expecting errors.
        for q in invalid_queries:
            try:
                redis_graph.query(q)
                assert (False)
            except redis.exceptions.ResponseError as e:
                # Expecting an error.
                assert ("Unexpected clause following RETURN" in str(e))
                pass

    # Parameters cannot reference aliases.
    def test33_alias_reference_in_param(self):
        try:
            query = """CYPHER A=[a] RETURN 5"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            assert ("Attempted to access variable" in str(e))
            pass

    def test34_self_referential_properties(self):
        # Skip this test if running under Valgrind, as it causes a memory leak.
        if Env().envRunner.debugger is not None:
            Env().skip()

        try:
            # The server should emit an error on trying to create a node with a self-referential property.
            query = """CREATE (a:L {v: a.v})"""
            redis_graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting an error.
            self.env.assertIn("undefined property", str(e))

        # MATCH clauses should be able to use self-referential properties as existential filters.
        query = """MATCH (a {age: a.age}) RETURN a.age"""
        actual_result = redis_graph.query(query)
        expected_result = [[34]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test a query that allocates a large buffer.
    def test35_large_query(self):
        retval = "abcdef" * 1_000
        query = "RETURN " + "\"" + retval + "\""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set[0][0], retval)

    def test36_multiple_proc_calls(self):
        query = """MATCH (a)
                   CALL algo.BFS(a, 3, NULL) YIELD nodes as ns1
                   MATCH (b)
                   CALL algo.BFS(b, 3, NULL) YIELD nodes as ns2
                   RETURN ns1"""
        plan = redis_graph.execution_plan(query)
        self.env.assertTrue(plan.count("ProcedureCall") == 2)

    def test37_list_comprehension_missuse(self):
        # all expect list comprehension,
        # unfortunately this isn't enforced by the parser
        # as such it is possible for a user miss-use this function
        # and our current arithmetic expression construction logic will
        # construct a malformed function call

        # make sure we're reciving an exception for each miss-use query
        queries = [
            "WITH 1 AS x RETURN all(x > 2)", "WITH 1 AS x RETURN all([1],2,3)"
        ]

        for q in queries:
            try:
                redis_graph.query(q)
                assert (False)
            except redis.exceptions.ResponseError as e:
                pass
コード例 #9
0
class testRelationPattern(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_con
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)
        self.populate_graph()

    def populate_graph(self):
        # Construct a graph with the form:
        # (v1)-[:e]->(v2)-[:e]->(v3)
        node_props = ['v1', 'v2', 'v3']

        nodes = []
        for idx, v in enumerate(node_props):
            node = Node(label="L", properties={"val": v})
            nodes.append(node)
            redis_graph.add_node(node)

        edge = Edge(nodes[0], "e", nodes[1])
        redis_graph.add_edge(edge)

        edge = Edge(nodes[1], "e", nodes[2])
        redis_graph.add_edge(edge)

        redis_graph.commit()

    # Test patterns that traverse 1 edge.
    def test01_one_hop_traversals(self):
        # Conditional traversal with label
        query = """MATCH (a)-[:e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_a = redis_graph.query(query)

        # Conditional traversal without label
        query = """MATCH (a)-[]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_b = redis_graph.query(query)

        # Fixed-length 1-hop traversal with label
        query = """MATCH (a)-[:e*1]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_c = redis_graph.query(query)

        # Fixed-length 1-hop traversal without label
        query = """MATCH (a)-[*1]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_d = redis_graph.query(query)

        self.env.assertEquals(result_b.result_set, result_a.result_set)
        self.env.assertEquals(result_c.result_set, result_a.result_set)
        self.env.assertEquals(result_d.result_set, result_a.result_set)

    # Test patterns that traverse 2 edges.
    def test02_two_hop_traversals(self):
        # Conditional two-hop traversal without referenced intermediate node
        query = """MATCH (a)-[:e]->()-[:e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Fixed-length two-hop traversal (same expected result)
        query = """MATCH (a)-[:e*2]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal with a minimum bound of 2 (same expected result)
        query = """MATCH (a)-[*2..]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Conditional two-hop traversal with referenced intermediate node
        query = """MATCH (a)-[:e]->(b)-[:e]->(c) RETURN a.val, b.val, c.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test variable-length patterns
    def test03_var_len_traversals(self):
        # Variable-length traversal with label
        query = """MATCH (a)-[:e*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'],
                           ['v1', 'v3'],
                           ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal without label (same expected result)
        query = """MATCH (a)-[*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal with bounds 1..2 (same expected result)
        query = """MATCH (a)-[:e*1..2]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal with bounds 0..1
        # This will return every node and itself, as well as all
        # single-hop edges.
        query = """MATCH (a)-[:e*0..1]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v1'],
                           ['v1', 'v2'],
                           ['v2', 'v2'],
                           ['v2', 'v3'],
                           ['v3', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test variable-length patterns with alternately labeled source
    # and destination nodes, which can cause different execution sequences.
    def test04_variable_length_labeled_nodes(self):
        # Source and edge labeled variable-length traversal
        query = """MATCH (a:L)-[:e*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'],
                           ['v1', 'v3'],
                           ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Destination and edge labeled variable-length traversal (same expected result)
        query = """MATCH (a)-[:e*]->(b:L) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Source labeled variable-length traversal (same expected result)
        query = """MATCH (a:L)-[*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Destination labeled variable-length traversal (same expected result)
        query = """MATCH (a)-[*]->(b:L) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test traversals over explicit relationship types
    def test05_relation_types(self):
        # Add two nodes and two edges of a new type.
        # The new form of the graph will be:
        # (v1)-[:e]->(v2)-[:e]->(v3)-[:q]->(v4)-[:q]->(v5)
        query = """MATCH (n {val: 'v3'}) CREATE (n)-[:q]->(:L {val: 'v4'})-[:q]->(:L {val: 'v5'})"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.nodes_created, 2)
        self.env.assertEquals(actual_result.relationships_created, 2)

        # Verify the graph structure
        query = """MATCH (a)-[e]->(b) RETURN a.val, b.val, TYPE(e) ORDER BY TYPE(e), a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2', 'e'],
                           ['v2', 'v3', 'e'],
                           ['v3', 'v4', 'q'],
                           ['v4', 'v5', 'q']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify conditional traversals with explicit relation types
        query = """MATCH (a)-[:e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'],
                           ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH (a)-[:q]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v3', 'v4'],
                           ['v4', 'v5']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify conditional traversals with multiple explicit relation types
        query = """MATCH (a)-[e:e|:q]->(b) RETURN a.val, b.val, TYPE(e) ORDER BY TYPE(e), a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2', 'e'],
                           ['v2', 'v3', 'e'],
                           ['v3', 'v4', 'q'],
                           ['v4', 'v5', 'q']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify variable-length traversals with explicit relation types
        query = """MATCH (a)-[:e*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'],
                           ['v1', 'v3'],
                           ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH (a)-[:q*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v3', 'v4'],
                           ['v3', 'v5'],
                           ['v4', 'v5']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify variable-length traversals with multiple explicit relation types
        query = """MATCH (a)-[:e|:q*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'],
                           ['v1', 'v3'],
                           ['v1', 'v4'],
                           ['v1', 'v5'],
                           ['v2', 'v3'],
                           ['v2', 'v4'],
                           ['v2', 'v5'],
                           ['v3', 'v4'],
                           ['v3', 'v5'],
                           ['v4', 'v5']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test traversals over transposed edge matrices.
    def test06_transposed_traversals(self):
        # The intermediate node 'b' will be used to form the scan operation because it is filtered.
        # As such, one of the traversals must be transposed.
        query = """MATCH (a)-[e]->(b {val:'v3'})-[]->(c:L) RETURN COUNT(e)"""
        plan = redis_graph.execution_plan(query)

        # Verify that the execution plan contains two traversals following opposing edge directions.
        self.env.assertIn("<-", plan)
        self.env.assertIn("->", plan)

        # Verify results.
        actual_result = redis_graph.query(query)
        expected_result = [[1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test07_transposed_multi_hop(self):
        redis_con = self.env.getConnection()
        g = Graph("tran_multi_hop", redis_con)

        # (a)-[R]->(b)-[R]->(c)<-[R]-(d)<-[R]-(e)
        a = Node(properties={"val": 'a'})
        b = Node(properties={"val": 'b'})
        c = Node(properties={"val": 'c'})
        d = Node(properties={"val": 'd'})
        e = Node(properties={"val": 'e'})
        
        g.add_node(a)
        g.add_node(b)
        g.add_node(c)
        g.add_node(d)
        g.add_node(e)

        ab = Edge(a, "R", b)
        bc = Edge(b, "R", c)
        ed = Edge(e, "R", d)
        dc = Edge(d, "R", c)

        g.add_edge(ab)
        g.add_edge(bc)
        g.add_edge(ed)
        g.add_edge(dc)

        g.flush()

        q = """MATCH (a)-[*2]->(b)<-[*2]-(c) RETURN a.val, b.val, c.val ORDER BY a.val, b.val, c.val"""
        actual_result = g.query(q)
        expected_result = [['a', 'c', 'a'], ['a', 'c', 'e'], ['e', 'c', 'a'], ['e', 'c', 'e']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test08_transposed_varlen_traversal(self):
        # Verify that variable-length traversals with nested transpose operations perform correctly.
        query = """MATCH (a {val: 'v1'})-[*]-(b {val: 'v2'})-[:e]->(:L {val: 'v3'}) RETURN a.val ORDER BY a.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test09_transposed_elem_order(self):
        redis_con = self.env.getConnection()
        g = Graph("transpose_patterns", redis_con)

        # Create a new graph of the form:
        # (A)<-[1]-(B)-[2]->(C)
        g.query("CREATE (a:A)<-[:E {val:'ba'}]-(b:B)-[:E {val:'bc'}]->(c:C)")

        queries = ["MATCH (a:A)<-[e1]-(b:B)-[e2]->(c:C) RETURN e1.val, e2.val",
                   "MATCH (a:A) WITH a MATCH (a)<-[e1]-(b:B)-[e2]->(c:C) RETURN e1.val, e2.val",
                   "MATCH (b:B) WITH b MATCH (a:A)<-[e1]-(b)-[e2]->(c:C) RETURN e1.val, e2.val",
                   "MATCH (c:C) WITH c MATCH (a:A)<-[e1]-(b:B)-[e2]->(c) RETURN e1.val, e2.val",
                   ]
        expected_result = [['ba', 'bc']]
        for query in queries:
            actual_result = g.query(query)
            self.env.assertEquals(actual_result.result_set, expected_result)

    def test10_triple_edge_type(self):
        # Construct a simple graph:
        # (A)-[X]->(B)
        # (A)-[Y]->(C)
        # (A)-[Z]->(D)
        g = Graph("triple_edge_type", redis_con)
        q = "CREATE(a:A), (b:B), (c:C), (d:D), (a)-[:X]->(b), (a)-[:Y]->(c), (a)-[:Z]->(d)"
        g.query(q)

        labels = ['X', 'Y', 'Z']
        expected_result = [[['B']], [['C']], [['D']]]

        q = "MATCH (a)-[:{L0}|:{L1}|:{L2}]->(b) RETURN labels(b) AS label ORDER BY label"
        import itertools
        for perm in itertools.permutations(labels):
            res = g.query(q.format(L0=perm[0], L1=perm[1], L2=perm[2]))
            self.env.assertEquals(res.result_set, expected_result)
コード例 #10
0
class testGraphBulkInsertFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_graph
        global redis_con
        redis_con = self.env.getConnection()
        redis_graph = Graph("graph", redis_con)

    # Run bulk loader script and validate terminal output
    def test01_run_script(self):
        graphname = "graph"
        runner = CliRunner()

        csv_path = os.path.dirname(os.path.abspath(
            __file__)) + '/../../demo/social/resources/bulk_formatted/'
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', csv_path + 'Person.csv', '--nodes',
            csv_path + 'Country.csv', '--relations', csv_path + 'KNOWS.csv',
            '--relations', csv_path + 'VISITED.csv', graphname
        ])

        # The script should report 27 node creations and 48 edge creations
        self.env.assertEquals(res.exit_code, 0)
        self.env.assertIn('27 nodes created', res.output)
        self.env.assertIn('56 relations created', res.output)

    # Validate that the expected nodes and properties have been constructed
    def test02_validate_nodes(self):
        global redis_graph
        # Query the newly-created graph
        query_result = redis_graph.query(
            'MATCH (p:Person) RETURN p.name, p.age, p.gender, p.status, ID(p) ORDER BY p.name'
        )
        # Verify that the Person label exists, has the correct attributes, and is properly populated
        expected_result = [['Ailon Velger', 32, 'male', 'married', 2],
                           ['Alon Fital', 32, 'male', 'married', 1],
                           ['Boaz Arad', 31, 'male', 'married', 4],
                           ['Gal Derriere', 26, 'male', 'single', 11],
                           ['Jane Chernomorin', 31, 'female', 'married', 8],
                           ['Lucy Yanfital', 30, 'female', 'married', 7],
                           ['Mor Yesharim', 31, 'female', 'married', 12],
                           ['Noam Nativ', 34, 'male', 'single', 13],
                           ['Omri Traub', 33, 'male', 'single', 5],
                           ['Ori Laslo', 32, 'male', 'married', 3],
                           ['Roi Lipman', 32, 'male', 'married', 0],
                           ['Shelly Laslo Rooz', 31, 'female', 'married', 9],
                           ['Tal Doron', 32, 'male', 'single', 6],
                           [
                               'Valerie Abigail Arad', 31, 'female', 'married',
                               10
                           ]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Verify that the Country label exists, has the correct attributes, and is properly populated
        query_result = redis_graph.query(
            'MATCH (c:Country) RETURN c.name, ID(c) ORDER BY c.name')
        expected_result = [['Andora', 21], ['Canada', 18], ['China', 19],
                           ['Germany', 24], ['Greece', 17], ['Italy', 25],
                           ['Japan', 16], ['Kazakhstan', 22],
                           ['Netherlands', 20], ['Prague', 15], ['Russia', 23],
                           ['Thailand', 26], ['USA', 14]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate that the expected relations and properties have been constructed
    def test03_validate_relations(self):
        # Query the newly-created graph
        query_result = redis_graph.query(
            'MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e.relation, b.name ORDER BY e.relation, a.name, b.name'
        )

        expected_result = [['Ailon Velger', 'friend', 'Noam Nativ'],
                           ['Alon Fital', 'friend', 'Gal Derriere'],
                           ['Alon Fital', 'friend', 'Mor Yesharim'],
                           ['Boaz Arad', 'friend', 'Valerie Abigail Arad'],
                           ['Roi Lipman', 'friend', 'Ailon Velger'],
                           ['Roi Lipman', 'friend', 'Alon Fital'],
                           ['Roi Lipman', 'friend', 'Boaz Arad'],
                           ['Roi Lipman', 'friend', 'Omri Traub'],
                           ['Roi Lipman', 'friend', 'Ori Laslo'],
                           ['Roi Lipman', 'friend', 'Tal Doron'],
                           ['Ailon Velger', 'married', 'Jane Chernomorin'],
                           ['Alon Fital', 'married', 'Lucy Yanfital'],
                           ['Ori Laslo', 'married', 'Shelly Laslo Rooz']]
        self.env.assertEquals(query_result.result_set, expected_result)

        query_result = redis_graph.query(
            'MATCH (a)-[e:VISITED]->(b) RETURN a.name, e.purpose, b.name ORDER BY e.purpose, a.name, b.name'
        )

        expected_result = [['Alon Fital', 'business', 'Prague'],
                           ['Alon Fital', 'business', 'USA'],
                           ['Boaz Arad', 'business', 'Netherlands'],
                           ['Boaz Arad', 'business', 'USA'],
                           ['Gal Derriere', 'business', 'Netherlands'],
                           ['Jane Chernomorin', 'business', 'USA'],
                           ['Lucy Yanfital', 'business', 'USA'],
                           ['Mor Yesharim', 'business', 'Germany'],
                           ['Ori Laslo', 'business', 'China'],
                           ['Ori Laslo', 'business', 'USA'],
                           ['Roi Lipman', 'business', 'Prague'],
                           ['Roi Lipman', 'business', 'USA'],
                           ['Tal Doron', 'business', 'Japan'],
                           ['Tal Doron', 'business', 'USA'],
                           ['Alon Fital', 'pleasure', 'Greece'],
                           ['Alon Fital', 'pleasure', 'Prague'],
                           ['Alon Fital', 'pleasure', 'USA'],
                           ['Boaz Arad', 'pleasure', 'Netherlands'],
                           ['Boaz Arad', 'pleasure', 'USA'],
                           ['Jane Chernomorin', 'pleasure', 'Greece'],
                           ['Jane Chernomorin', 'pleasure', 'Netherlands'],
                           ['Jane Chernomorin', 'pleasure', 'USA'],
                           ['Lucy Yanfital', 'pleasure', 'Kazakhstan'],
                           ['Lucy Yanfital', 'pleasure', 'Prague'],
                           ['Lucy Yanfital', 'pleasure', 'USA'],
                           ['Mor Yesharim', 'pleasure', 'Greece'],
                           ['Mor Yesharim', 'pleasure', 'Italy'],
                           ['Noam Nativ', 'pleasure', 'Germany'],
                           ['Noam Nativ', 'pleasure', 'Netherlands'],
                           ['Noam Nativ', 'pleasure', 'Thailand'],
                           ['Omri Traub', 'pleasure', 'Andora'],
                           ['Omri Traub', 'pleasure', 'Greece'],
                           ['Omri Traub', 'pleasure', 'USA'],
                           ['Ori Laslo', 'pleasure', 'Canada'],
                           ['Roi Lipman', 'pleasure', 'Japan'],
                           ['Roi Lipman', 'pleasure', 'Prague'],
                           ['Shelly Laslo Rooz', 'pleasure', 'Canada'],
                           ['Shelly Laslo Rooz', 'pleasure', 'China'],
                           ['Shelly Laslo Rooz', 'pleasure', 'USA'],
                           ['Tal Doron', 'pleasure', 'Andora'],
                           ['Tal Doron', 'pleasure', 'USA'],
                           ['Valerie Abigail Arad', 'pleasure', 'Netherlands'],
                           ['Valerie Abigail Arad', 'pleasure', 'Russia']]
        self.env.assertEquals(query_result.result_set, expected_result)

    def test04_private_identifiers(self):
        graphname = "tmpgraph1"
        # Write temporary files
        with open('/tmp/nodes.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["_identifier", "nodename"])
            out.writerow([0, "a"])
            out.writerow([5, "b"])
            out.writerow([3, "c"])
        with open('/tmp/relations.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["src", "dest"])
            out.writerow([0, 3])
            out.writerow([5, 3])

        runner = CliRunner()
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', '/tmp/nodes.tmp', '--relations',
            '/tmp/relations.tmp', graphname
        ])

        # The script should report 3 node creations and 2 edge creations
        self.env.assertEquals(res.exit_code, 0)
        self.env.assertIn('3 nodes created', res.output)
        self.env.assertIn('2 relations created', res.output)

        # Delete temporary files
        os.remove('/tmp/nodes.tmp')
        os.remove('/tmp/relations.tmp')

        tmp_graph = Graph(graphname, redis_con)
        # The field "_identifier" should not be a property in the graph
        query_result = tmp_graph.query('MATCH (a) RETURN a')

        for propname in query_result.header:
            self.env.assertNotIn('_identifier', propname)

    def test05_reused_identifier(self):
        graphname = "tmpgraph2"
        # Write temporary files
        with open('/tmp/nodes.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["_identifier", "nodename"])
            out.writerow([0, "a"])
            out.writerow([5, "b"])
            out.writerow([0, "c"])  # reused identifier
        with open('/tmp/relations.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["src", "dest"])
            out.writerow([0, 3])

        runner = CliRunner()
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', '/tmp/nodes.tmp', '--relations',
            '/tmp/relations.tmp', graphname
        ])

        # The script should fail because a node identifier is reused
        self.env.assertNotEqual(res.exit_code, 0)
        self.env.assertIn('used multiple times', res.output)

        # Run the script again without creating relations
        runner = CliRunner()
        res = runner.invoke(
            bulk_insert,
            ['--port', port, '--nodes', '/tmp/nodes.tmp', graphname])

        # The script should succeed and create 3 nodes
        self.env.assertEquals(res.exit_code, 0)
        self.env.assertIn('3 nodes created', res.output)

        # Delete temporary files
        os.remove('/tmp/nodes.tmp')
        os.remove('/tmp/relations.tmp')

    def test06_batched_build(self):
        # Create demo graph wth one query per input file
        graphname = "batched_graph"
        runner = CliRunner()

        csv_path = os.path.dirname(os.path.abspath(
            __file__)) + '/../../demo/social/resources/bulk_formatted/'
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', csv_path + 'Person.csv', '--nodes',
            csv_path + 'Country.csv', '--relations', csv_path + 'KNOWS.csv',
            '--relations', csv_path + 'VISITED.csv', '--max-token-count', 1,
            graphname
        ])

        self.env.assertEquals(res.exit_code, 0)
        # The script should report statistics multiple times
        self.env.assertGreater(res.output.count('nodes created'), 1)

        new_graph = Graph(graphname, redis_con)

        # Newly-created graph should be identical to graph created in single query
        original_result = redis_graph.query(
            'MATCH (p:Person) RETURN p, ID(p) ORDER BY p.name')
        new_result = new_graph.query(
            'MATCH (p:Person) RETURN p, ID(p) ORDER BY p.name')
        self.env.assertEquals(original_result.result_set,
                              new_result.result_set)

        original_result = redis_graph.query(
            'MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e, b.name ORDER BY e.relation, a.name'
        )
        new_result = new_graph.query(
            'MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e, b.name ORDER BY e.relation, a.name'
        )
        self.env.assertEquals(original_result.result_set,
                              new_result.result_set)

    def test07_script_failures(self):
        graphname = "tmpgraph3"
        # Write temporary files
        with open('/tmp/nodes.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["id", "nodename"])
            out.writerow([0])  # Wrong number of properites

        runner = CliRunner()
        res = runner.invoke(
            bulk_insert,
            ['--port', port, '--nodes', '/tmp/nodes.tmp', graphname])

        # The script should fail because a row has the wrong number of fields
        self.env.assertNotEqual(res.exit_code, 0)
        self.env.assertIn('Expected 2 columns', str(res.exception))

        # Write temporary files
        with open('/tmp/nodes.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["id", "nodename"])
            out.writerow([0, "a"])

        with open('/tmp/relations.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["src"])  # Incomplete relation description
            out.writerow([0])

        runner = CliRunner()
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', '/tmp/nodes.tmp', '--relations',
            '/tmp/relations.tmp', graphname
        ])

        # The script should fail because a row has the wrong number of fields
        self.env.assertNotEqual(res.exit_code, 0)
        self.env.assertIn('should have at least 2 elements',
                          str(res.exception))

        with open('/tmp/relations.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["src", "dest"])
            out.writerow([0, "fakeidentifier"])

        runner = CliRunner()
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', '/tmp/nodes.tmp', '--relations',
            '/tmp/relations.tmp', graphname
        ])

        # The script should fail because an invalid node identifier was used
        self.env.assertNotEqual(res.exit_code, 0)
        self.env.assertIn('fakeidentifier', str(res.exception))
        os.remove('/tmp/nodes.tmp')
        os.remove('/tmp/relations.tmp')

        # Test passing invalid arguments directly to the GRAPH.BULK endpoint
        try:
            redis_con.execute_command("GRAPH.BULK", "a", "a", "a")
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn("Invalid graph operation on empty key", str(e))

    # Verify that numeric, boolean, and null types are properly handled
    def test08_property_types(self):
        graphname = "tmpgraph4"
        # Write temporary files
        with open('/tmp/nodes.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["numeric", "mixed", "bool"])
            out.writerow([0, '', True])
            out.writerow([5, "notnull", False])
            out.writerow([7, '', False])  # reused identifier
        with open('/tmp/relations.tmp', mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["src", "dest", "prop"])
            out.writerow([0, 5, True])
            out.writerow([5, 7, 3.5])
            out.writerow([7, 0, ''])

        runner = CliRunner()
        res = runner.invoke(bulk_insert, [
            '--port', port, '--nodes', '/tmp/nodes.tmp', '--relations',
            '/tmp/relations.tmp', graphname
        ])

        self.env.assertEquals(res.exit_code, 0)
        self.env.assertIn('3 nodes created', res.output)
        self.env.assertIn('3 relations created', res.output)

        graph = Graph(graphname, redis_con)
        query_result = graph.query(
            'MATCH (a)-[e]->() RETURN a.numeric, a.mixed, a.bool, e.prop ORDER BY a.numeric, e.prop'
        )
        expected_result = [[0, None, True, True], [5, 'notnull', False, 3.5],
                           [7, None, False, None]]

        # The graph should have the correct types for all properties
        self.env.assertEquals(query_result.result_set, expected_result)

    # Verify that the bulk loader does not block the server
    def test09_large_bulk_insert(self):
        graphname = "tmpgraph5"
        prop_str = "Property value to be repeated 1 million generating a multi-megabyte CSV"

        # Write temporary files
        filename = '/tmp/nodes.tmp'
        with open(filename, mode='w') as csv_file:
            out = csv.writer(csv_file)
            out.writerow(["long_property_string"])
            for i in range(100_000):
                out.writerow([prop_str])

        # Instantiate a thread to run the bulk loader
        thread = threading.Thread(target=run_bulk_loader,
                                  args=(graphname, filename))
        thread.start()

        # Ping server while bulk-loader is running
        ping_count = 0
        while thread.is_alive():
            t0 = time.time()
            redis_con.ping()
            t1 = time.time() - t0
            # Verify that pinging the server takes less than 1 second during bulk insertion
            self.env.assertLess(t1, 2)
            ping_count += 1

        thread.join()
        # Verify that at least one ping was issued
        self.env.assertGreater(ping_count, 1)
コード例 #11
0
class testIndexUpdatesFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)
        self.populate_graph()
        self.build_indices()

    def new_node(self):
        return Node(label=labels[node_ctr % 2],
                    properties={
                        'unique':
                        node_ctr,
                        'group':
                        random.choice(groups),
                        'doubleval':
                        round(random.uniform(-1, 1), 2),
                        'intval':
                        random.randint(1, 10000),
                        'stringval':
                        ''.join(
                            random.choice(string.lowercase) for x in range(6))
                    })

    def populate_graph(self):
        global node_ctr
        for i in range(1000):
            node = self.new_node()
            redis_graph.add_node(node)
            node_ctr += 1
        redis_graph.commit()

    def build_indices(self):
        for field in fields:
            redis_graph.redis_con.execute_command(
                "GRAPH.QUERY", GRAPH_ID,
                "CREATE INDEX ON :label_a(%s)" % (field))
            redis_graph.redis_con.execute_command(
                "GRAPH.QUERY", GRAPH_ID,
                "CREATE INDEX ON :label_b(%s)" % (field))

    # Validate that all properties are indexed
    def validate_indexed(self):
        for field in fields:
            resp = redis_graph.execution_plan(
                """MATCH (a:label_a) WHERE a.%s > 0 RETURN a""" % (field))
            self.env.assertIn('Index Scan', resp)

    # So long as 'unique' is not modified, label_a.unique will always be even and label_b.unique will always be odd
    def validate_unique(self):
        result = redis_graph.query("MATCH (a:label_a) RETURN a.unique")
        # Remove the header
        result.result_set.pop(0)
        for val in result.result_set:
            self.env.assertEquals(int(float(val[0])) % 2, 0)

        result = redis_graph.query("MATCH (b:label_b) RETURN b.unique")
        # Remove the header
        result.result_set.pop(0)
        for val in result.result_set:
            self.env.assertEquals(int(float(val[0])) % 2, 1)

    # The index scan ought to return identical results to a label scan over the same range of values.
    def validate_doubleval(self):
        for label in labels:
            resp = redis_graph.execution_plan(
                """MATCH (a:%s) WHERE a.doubleval < 100 RETURN a.doubleval ORDER BY a.doubleval"""
                % (label))
            self.env.assertIn('Index Scan', resp)
            indexed_result = redis_graph.query(
                """MATCH (a:%s) WHERE a.doubleval < 100 RETURN a.doubleval ORDER BY a.doubleval"""
                % (label))
            scan_result = redis_graph.query(
                """MATCH (a:%s) RETURN a.doubleval ORDER BY a.doubleval""" %
                (label))

            self.env.assertEqual(len(indexed_result.result_set),
                                 len(scan_result.result_set))
            # Collect any elements between the two result sets that fail a string comparison
            # so that we may compare them as doubles (specifically, -0 and 0 should be considered equal)
            differences = [[i[0], j[0]] for i, j in zip(
                indexed_result.result_set, scan_result.result_set) if i != j]
            for pair in differences:
                self.env.assertEqual(float(pair[0]), float(pair[1]))

    # The intval property can be assessed similar to doubleval, but the result sets should be identical
    def validate_intval(self):
        for label in labels:
            resp = redis_graph.execution_plan(
                """MATCH (a:%s) WHERE a.intval > 0 RETURN a.intval ORDER BY a.intval"""
                % (label))
            self.env.assertIn('Index Scan', resp)
            indexed_result = redis_graph.query(
                """MATCH (a:%s) WHERE a.intval > 0 RETURN a.intval ORDER BY a.intval"""
                % (label))
            scan_result = redis_graph.query(
                """MATCH (a:%s) RETURN a.intval ORDER BY a.intval""" % (label))

            self.env.assertEqual(indexed_result.result_set,
                                 scan_result.result_set)

    # Validate a series of premises to ensure that the graph has not been modified unexpectedly
    def validate_state(self):
        self.validate_unique()
        self.validate_indexed()
        self.validate_doubleval()
        self.validate_intval()

    # Modify a property, triggering updates to all nodes in two indices
    def test01_full_property_update(self):
        result = redis_graph.query(
            "MATCH (a) SET a.doubleval = a.doubleval + %f" %
            (round(random.uniform(-1, 1), 2)))
        self.env.assertEquals(result.properties_set, 1000)
        # Verify that index scans still function and return correctly
        self.validate_state()

    # Modify a property, triggering updates to a subset of nodes in two indices
    def test02_partial_property_update(self):
        redis_graph.query(
            "MATCH (a) WHERE a.doubleval > 0 SET a.doubleval = a.doubleval + %f"
            % (round(random.uniform(-1, 1), 2)))
        # Verify that index scans still function and return correctly
        self.validate_state()

    #  Add 100 randomized nodes and validate indices
    def test03_node_creation(self):
        # Reset nodes in the Graph object so that we won't double-commit the originals
        redis_graph.nodes = {}
        global node_ctr
        for i in range(100):
            node = self.new_node()
            redis_graph.add_node(node)
            node_ctr += 1
        redis_graph.commit()
        self.validate_state()

    # Delete every other node in first 100 and validate indices
    def test04_node_deletion(self):
        # Reset nodes in the Graph object so that we won't double-commit the originals
        redis_graph.nodes = {}
        global node_ctr
        # Delete nodes one at a time
        for i in range(0, 100, 2):
            result = redis_graph.query("MATCH (a) WHERE ID(a) = %d DELETE a" %
                                       (i))
            self.env.assertEquals(result.nodes_deleted, 1)
            node_ctr -= 1
        self.validate_state()

        # Delete all nodes matching a filter
        result = redis_graph.query(
            "MATCH (a:label_a) WHERE a.group = 'Group A' DELETE a")
        self.env.assertGreater(result.nodes_deleted, 0)
        self.validate_state()
コード例 #12
0
class testGraphPersistency(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global redis_graph
        global dense_graph
        global redis_con
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_NAME, redis_con)
        dense_graph = Graph(DENSE_GRAPH_NAME, redis_con)
        self.populate_graph()
        self.populate_dense_graph()

    def populate_graph(self):
        global redis_graph

        if not redis_con.exists(GRAPH_NAME):
            personNodes = {}
            countryNodes = {}
            # Create entities

            for p in people:
                person = Node(label="person", properties={"name": p})
                redis_graph.add_node(person)
                personNodes[p] = person

            for p in countries:
                country = Node(label="country", properties={"name": p})
                redis_graph.add_node(country)
                countryNodes[p] = country

            for v in visits:
                person = v[0]
                country = v[1]
                edge = Edge(personNodes[person],
                            'visit',
                            countryNodes[country],
                            properties={'purpose': 'pleasure'})
                redis_graph.add_edge(edge)

            redis_graph.commit()

            # Delete nodes, to introduce deleted item within our datablock
            query = """MATCH (n:person) WHERE n.name = 'Roi' or n.name = 'Ailon' DELETE n"""
            redis_graph.query(query)

            query = """MATCH (n:country) WHERE n.name = 'USA' DELETE n"""
            redis_graph.query(query)

            # Create index.
            actual_result = redis_con.execute_command(
                "GRAPH.QUERY", GRAPH_NAME, "CREATE INDEX ON :person(name)")
            actual_result = redis_con.execute_command(
                "GRAPH.QUERY", GRAPH_NAME, "CREATE INDEX ON :country(name)")

    def populate_dense_graph(self):
        global dense_graph

        if not redis_con.exists(DENSE_GRAPH_NAME):
            nodes = []
            for i in range(10):
                node = Node(label="n", properties={"val": i})
                dense_graph.add_node(node)
                nodes.append(node)

            for n_idx, n in enumerate(nodes):
                for m_idx, m in enumerate(nodes[:n_idx]):
                    dense_graph.add_edge(Edge(n, "connected", m))

            dense_graph.commit()

    #  Connect a single node to all other nodes.
    def test01_save_load_rdb(self):
        for i in range(2):
            if i == 1:
                # Save RDB & Load from RDB
                self.env.dumpAndReload()

            # Verify
            # Expecting 5 person entities.
            query = """MATCH (p:person) RETURN COUNT(p)"""
            actual_result = redis_graph.query(query)
            nodeCount = actual_result.result_set[0][0]
            self.env.assertEquals(nodeCount, 5)

            query = """MATCH (p:person) WHERE p.name='Alon' RETURN COUNT(p)"""
            actual_result = redis_graph.query(query)
            nodeCount = actual_result.result_set[0][0]
            self.env.assertEquals(nodeCount, 1)

            # Expecting 3 country entities.
            query = """MATCH (c:country) RETURN COUNT(c)"""
            actual_result = redis_graph.query(query)
            nodeCount = actual_result.result_set[0][0]
            self.env.assertEquals(nodeCount, 3)

            query = """MATCH (c:country) WHERE c.name = 'Israel' RETURN COUNT(c)"""
            actual_result = redis_graph.query(query)
            nodeCount = actual_result.result_set[0][0]
            self.env.assertEquals(nodeCount, 1)

            # Expecting 2 visit edges.
            query = """MATCH (n:person)-[e:visit]->(c:country) WHERE e.purpose='pleasure' RETURN COUNT(e)"""
            actual_result = redis_graph.query(query)
            edgeCount = actual_result.result_set[0][0]
            self.env.assertEquals(edgeCount, 2)

            # Verify indices exists.
            plan = redis_graph.execution_plan(
                "MATCH (n:person) WHERE n.name = 'Roi' RETURN n")
            self.env.assertIn("Index Scan", plan)

            plan = redis_graph.execution_plan(
                "MATCH (n:country) WHERE n.name = 'Israel' RETURN n")
            self.env.assertIn("Index Scan", plan)

    # Verify that edges are not modified after entity deletion
    def test02_deleted_entity_migration(self):
        query = """MATCH (p) WHERE ID(p) = 0 OR ID(p) = 3 OR ID(p) = 7 OR ID(p) = 9 DELETE p"""
        actual_result = dense_graph.query(query)
        self.env.assertEquals(actual_result.nodes_deleted, 4)
        query = """MATCH (p)-[]->(q) RETURN p.val, q.val ORDER BY p.val, q.val"""
        first_result = dense_graph.query(query)

        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")

        second_result = dense_graph.query(query)
        self.env.assertEquals(first_result.result_set,
                              second_result.result_set)

    # Strings, numerics, booleans, arrays and NULL properties should be properly serialized and reloaded
    def test03_restore_properties(self):
        graphname = "simple_props"
        graph = Graph(graphname, redis_con)
        query = """CREATE (:p {strval: 'str', numval: 5.5, nullval: NULL, boolval: true, array: [1,2,3]})"""
        actual_result = graph.query(query)
        # Verify that node was created correctly
        self.env.assertEquals(actual_result.nodes_created, 1)
        self.env.assertEquals(actual_result.properties_set, 5)

        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")

        query = """MATCH (p) RETURN p.boolval, p.nullval, p.numval, p.strval, p.array"""
        actual_result = graph.query(query)

        # Verify that the properties are loaded correctly.
        # Note that the order of results is not guaranteed (currently managed by the Schema),
        # so this may need to be updated in the future.
        expected_result = [[True, None, 5.5, 'str', [1, 2, 3]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Verify multiple edges of the same relation between nodes A and B
    # are saved and restored correctly.
    def test04_repeated_edges(self):
        graphname = "repeated_edges"
        g = Graph(graphname, redis_con)
        src = Node(label='p', properties={'name': 'src'})
        dest = Node(label='p', properties={'name': 'dest'})
        edge1 = Edge(src, 'e', dest, properties={'val': 1})
        edge2 = Edge(src, 'e', dest, properties={'val': 2})
        g.add_node(src)
        g.add_node(dest)
        g.add_edge(edge1)
        g.add_edge(edge2)
        g.commit()

        # Verify the new edge
        q = """MATCH (a)-[e]->(b) RETURN e.val, a.name, b.name ORDER BY e.val"""
        actual_result = g.query(q)

        expected_result = [[
            edge1.properties['val'], src.properties['name'],
            dest.properties['name']
        ],
                           [
                               edge2.properties['val'], src.properties['name'],
                               dest.properties['name']
                           ]]

        self.env.assertEquals(actual_result.result_set, expected_result)

        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")

        # Verify that the latest edge was properly saved and loaded
        actual_result = g.query(q)
        self.env.assertEquals(actual_result.result_set, expected_result)
コード例 #13
0
class testShortestPath(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)
        self.populate_graph()

    def populate_graph(self):
        # Construct a graph with the form:
        # (v1)-[:E]->(v2)-[:E]->(v3)-[:E]->(v4), (v1)-[:E]->(v5)-[:E2]->(v4)

        global nodes
        for v in range(1, 6):
            node = Node(label="L", properties={"v": v})
            nodes.append(node)
            redis_graph.add_node(node)

        edge = Edge(nodes[0], "E", nodes[1])
        redis_graph.add_edge(edge)

        edge = Edge(nodes[1], "E", nodes[2])
        redis_graph.add_edge(edge)

        edge = Edge(nodes[2], "E", nodes[3])
        redis_graph.add_edge(edge)

        edge = Edge(nodes[0], "E", nodes[4])
        redis_graph.add_edge(edge)

        edge = Edge(nodes[4], "E2", nodes[3])
        redis_graph.add_edge(edge)

        redis_graph.commit()

    def test01_invalid_shortest_paths(self):
        query = """MATCH (a {v: 1}), (b {v: 4}), p = shortestPath((a)-[*]->(b)) RETURN p"""
        try:
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn(
                "RedisGraph currently only supports shortestPath in WITH or RETURN clauses",
                str(e))

        query = """MATCH (a {v: 1}), (b {v: 4}) RETURN shortestPath((a)-[*2..]->(b))"""
        try:
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn(
                "shortestPath does not support a minimal length different from 0 or 1",
                str(e))

        query = """MATCH (a {v: 1}), (b {v: 4}) RETURN shortestPath((a)-[]->()-[*]->(b))"""
        try:
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn(
                "shortestPath requires a path containing a single relationship",
                str(e))

        query = """MATCH (a {v: 1}), (b {v: 4}) RETURN shortestPath((a)-[* {weight: 4}]->(b))"""
        try:
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn("filters on relationships in shortestPath",
                              str(e))

        # Try iterating over an invalid relationship type
        query = """MATCH (a {v: 1}), (b {v: 4}) RETURN shortestPath((a)-[:FAKE*]->(b))"""
        actual_result = redis_graph.query(query)
        # No results should be found
        expected_result = [[None]]
        self.env.assertEqual(actual_result.result_set, expected_result)

    def test02_simple_shortest_path(self):
        query = """MATCH (a {v: 1}), (b {v: 4}) WITH shortestPath((a)-[*]->(b)) AS p UNWIND nodes(p) AS n RETURN n.v"""
        actual_result = redis_graph.query(query)
        # The shorter 2-hop traversal should be found
        expected_result = [[1], [5], [4]]
        self.env.assertEqual(actual_result.result_set, expected_result)
        # Verify that a right-to-left traversal produces the same results
        query = """MATCH (a {v: 1}), (b {v: 4}) WITH shortestPath((b)<-[*]-(a)) AS p UNWIND nodes(p) AS n RETURN n.v"""
        self.env.assertEqual(actual_result.result_set, expected_result)

    def test03_shortest_path_multiple_results(self):
        # Traverse from all source nodes to the destination node
        query = """MATCH (a), (b {v: 4}) WITH a, shortestPath((a)-[*]->(b)) AS p RETURN a, nodes(p) ORDER BY a"""
        actual_result = redis_graph.query(query)
        expected_result = [[nodes[0], [nodes[0], nodes[4], nodes[3]]],
                           [nodes[1], [nodes[1], nodes[2], nodes[3]]],
                           [nodes[2], [nodes[2], nodes[3]]], [nodes[3], None],
                           [nodes[4], [nodes[4], nodes[3]]]]
        self.env.assertEqual(actual_result.result_set, expected_result)

    def test04_max_hops(self):
        # Traverse from all source nodes to the destination node if there is a single-hop path
        query = """MATCH (a), (b {v: 4}) WITH a, shortestPath((a)-[*..1]->(b)) AS p RETURN a, nodes(p) ORDER BY a"""
        actual_result = redis_graph.query(query)
        expected_result = [[nodes[0], None], [nodes[1], None],
                           [nodes[2], [nodes[2], nodes[3]]], [nodes[3], None],
                           [nodes[4], [nodes[4], nodes[3]]]]
        self.env.assertEqual(actual_result.result_set, expected_result)

    def test05_min_hops(self):
        # Traverse from all source nodes to the destination node with a minimum hop value of 0.
        # This will produce the same results as the above query with the exception of
        # the src == dest case, in which case that node is returned.
        query = """MATCH (a), (b {v: 4}) WITH a, shortestPath((a)-[*0..]->(b)) AS p RETURN a, nodes(p) ORDER BY a"""
        actual_result = redis_graph.query(query)
        expected_result = [[nodes[0], [nodes[0], nodes[4], nodes[3]]],
                           [nodes[1], [nodes[1], nodes[2], nodes[3]]],
                           [nodes[2], [nodes[2], nodes[3]]],
                           [nodes[3], [nodes[3]]],
                           [nodes[4], [nodes[4], nodes[3]]]]
        self.env.assertEqual(actual_result.result_set, expected_result)

    def test06_restricted_reltypes(self):
        # Traverse both relationship types
        query = """MATCH (a {v: 1}), (b {v: 4}) WITH shortestPath((a)-[:E|:E2*]->(b)) AS p UNWIND nodes(p) AS n RETURN n.v"""
        actual_result = redis_graph.query(query)
        # The shorter 2-hop traversal should be found
        expected_result = [[1], [5], [4]]
        self.env.assertEqual(actual_result.result_set, expected_result)

        # Only traverse edges of type E
        query = """MATCH (a {v: 1}), (b {v: 4}) WITH shortestPath((a)-[:E*]->(b)) AS p UNWIND nodes(p) AS n RETURN n.v"""
        actual_result = redis_graph.query(query)
        # The longer traversal will be found
        expected_result = [[1], [2], [3], [4]]
        self.env.assertEqual(actual_result.result_set, expected_result)
コード例 #14
0
class testComprehensionFunctions(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_graph
        graph_id = "list_comprehension"
        redis_con = self.env.getConnection()
        redis_graph = Graph(graph_id, redis_con)
        self.populate_graph()

    def populate_graph(self):
        global redis_graph

        # Construct a graph with the form:
        # (v1)-[e1]->(v2)-[e2]->(v3)
        node_props = ['v1', 'v2', 'v3']

        nodes = []
        for idx, v in enumerate(node_props):
            node = Node(label="L", properties={"val": v})
            nodes.append(node)
            redis_graph.add_node(node)

        edge = Edge(nodes[0],
                    "E",
                    nodes[1],
                    properties={"edge_val": ['v1', 'v2']})
        redis_graph.add_edge(edge)

        edge = Edge(nodes[1],
                    "E",
                    nodes[2],
                    properties={"edge_val": ['v2', 'v3']})
        redis_graph.add_edge(edge)

        redis_graph.commit()

    # Test list comprehension queries with scalar inputs and a single result row
    def test01_list_comprehension_single_return(self):
        expected_result = [[[2, 6]]]

        # Test logically identical queries that generate the same input array with different methods.
        query = """WITH [1,2,3] AS arr RETURN [elem IN arr WHERE elem % 2 = 1 | elem * 2]"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN [elem IN [1,2,3] WHERE elem % 2 = 1 | elem * 2]"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN [elem IN range(1,3) WHERE elem % 2 = 1 | elem * 2]"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test02_list_comprehension_no_filter_no_map(self):
        expected_result = [[[1, 2, 3]]]
        query = """WITH [1,2,3] AS arr RETURN [elem IN arr]"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)
        query = """RETURN [elem IN [1,2,3]]"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test03_list_comprehension_map_no_filter(self):
        query = """WITH [1,2,3] AS arr RETURN [elem IN arr | elem * 2]"""
        actual_result = redis_graph.query(query)
        expected_result = [[[2, 4, 6]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test04_list_comprehension_filter_no_map(self):
        query = """WITH [1,2,3] AS arr RETURN [elem IN arr WHERE elem % 2 = 1]"""
        actual_result = redis_graph.query(query)
        expected_result = [[[1, 3]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test05_list_comprehension_on_allocated_values(self):
        query = """WITH [toUpper('str1'), toUpper('str2'), toUpper('str3')] AS arr RETURN [elem IN arr]"""
        actual_result = redis_graph.query(query)
        expected_result = [[['STR1', 'STR2', 'STR3']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """WITH [toUpper('str1'), toUpper('str2'), toUpper('str3')] AS arr RETURN [elem IN arr WHERE toLower(elem) = 'str2']"""
        actual_result = redis_graph.query(query)
        expected_result = [[['STR2']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """WITH [toUpper('str1'), toUpper('str2'), toUpper('str3')] AS arr RETURN [elem IN arr WHERE toLower(elem) = 'str2' | elem + 'low']"""
        actual_result = redis_graph.query(query)
        expected_result = [[['STR2low']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test06_list_comprehension_on_graph_entities(self):
        query = """MATCH p=()-[*]->() WITH nodes(p) AS nodes RETURN [elem IN nodes]"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(len(actual_result.result_set), 3)

        query = """MATCH p=()-[*]->() WITH nodes(p) AS nodes WITH [elem IN nodes | elem.val] AS vals RETURN vals ORDER BY vals"""
        actual_result = redis_graph.query(query)
        expected_result = [[['v1', 'v2']], [['v1', 'v2', 'v3']], [['v2',
                                                                   'v3']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH p=()-[*]->() WITH nodes(p) AS nodes RETURN [elem IN nodes WHERE elem.val = 'v2' | elem.val]"""
        actual_result = redis_graph.query(query)
        expected_result = [[['v2']], [['v2']], [['v2']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH p=()-[*]->() WITH nodes(p) AS nodes RETURN [elem IN nodes WHERE elem.val = 'v2' | elem.val + 'a']"""
        actual_result = redis_graph.query(query)
        expected_result = [[['v2a']], [['v2a']], [['v2a']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test07_list_comprehension_in_where_predicate(self):
        # List comprehension with predicate in WHERE predicate on MATCH clause - evaluates to true
        query = """MATCH (n) WHERE n.val IN [x in ['v1', 'v3']] RETURN n.val ORDER BY n.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1'], ['v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # List comprehension with predicate in WHERE predicate - evaluates to true
        query = """WITH 1 AS a WHERE a IN [x in [1, 2]] RETURN a"""
        actual_result = redis_graph.query(query)
        expected_result = [[1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # List comprehension with predicate in WHERE predicate - evaluates to false
        query = """WITH 1 AS a WHERE a IN [x in [2,3]] RETURN a"""
        actual_result = redis_graph.query(query)
        expected_result = []
        self.env.assertEquals(actual_result.result_set, expected_result)

        # List comprehension with predicate and eval in WHERE predicate - evaluates to false
        query = """WITH 1 AS a WHERE [i in [2,3] WHERE i > 5] RETURN a"""
        actual_result = redis_graph.query(query)
        expected_result = []
        self.env.assertEquals(actual_result.result_set, expected_result)

        # List comprehension without predicate or eval in WHERE predicate - evaluates to true
        query = """WITH 1 AS a WHERE [i in [2,3]] RETURN a"""
        actual_result = redis_graph.query(query)
        expected_result = [[1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test08_list_comprehension_on_property_array(self):
        query = """MATCH (n)-[e]->() WITH n, e ORDER BY n.val RETURN [elem IN e.edge_val WHERE elem = n.val]"""
        actual_result = redis_graph.query(query)
        expected_result = [[['v1']], [['v2']]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test09_nested_list_comprehension(self):
        query = """RETURN [elem IN [nested_val IN range(0, 6) WHERE nested_val % 2 = 0] WHERE elem * 2 >= 4 | elem * 2]"""
        actual_result = redis_graph.query(query)
        expected_result = [[[4, 8, 12]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test10_any_all_comprehension_acceptance(self):
        # Reject ANY and ALL comprehensions that don't include a WHERE predicate.
        try:
            redis_graph.query("RETURN any(x IN [1,2])")
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting a type error.
            self.env.assertIn("requires a WHERE predicate", str(e))

        try:
            redis_graph.query("RETURN all(x IN [1,2])")
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting a type error.
            self.env.assertIn("requires a WHERE predicate", str(e))

    def test11_any_all_truth_table(self):
        # Test inputs and predicates where ANY and ALL are both false.
        query = """RETURN any(x IN [0,1] WHERE x = 2)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[False]])

        query = """RETURN all(x IN [0,1] WHERE x = 2)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[False]])

        # Test inputs and predicates where ANY is true and ALL is false.
        query = """RETURN any(x IN [0,1] WHERE x = 1)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[True]])

        query = """RETURN all(x IN [0,1] WHERE x = 1)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[False]])

        # Test inputs and predicates where ANY and ALL are both true.
        query = """RETURN any(x IN [0,1] WHERE x = 0 OR x = 1)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[True]])

        query = """RETURN all(x IN [0,1] WHERE x = 0 OR x = 1)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[True]])

        # Test inputs and predicates where ANY and ALL are both NULL.
        query = """RETURN any(x IN NULL WHERE x = 1)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[None]])

        query = """RETURN all(x IN NULL WHERE x = 1)"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[None]])

    def test12_any_all_on_property_arrays(self):
        # The first array evaluates to ['v1', 'v2'] and the second evaluates to ['v2', 'v3']
        query = """MATCH ()-[e]->() WITH e ORDER BY e.edge_val RETURN ANY(elem IN e.edge_val WHERE elem = 'v2' OR elem = 'v3')"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[True], [True]])

        query = """MATCH ()-[e]->() WITH e ORDER BY e.edge_val RETURN ALL(elem IN e.edge_val WHERE elem = 'v2' OR elem = 'v3')"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, [[False], [True]])

    def test13_any_all_path_filtering(self):
        # Use ANY and ALL to introspect on named variable-length paths.
        # All paths should be returned using both ANY and ALL filters.
        expected_result = [['v1'], ['v1'], ['v2']]
        query = """MATCH p=()-[*]->() WHERE any(node IN nodes(p) WHERE node.val STARTS WITH 'v') WITH head(nodes(p)) AS n RETURN n.val ORDER BY n.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH p=()-[*]->() WHERE all(node IN nodes(p) WHERE node.val STARTS WITH 'v') WITH head(nodes(p)) AS n RETURN n.val ORDER BY n.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Run a query in which 2 paths pass an ANY filter and 1 path passes an ALL filter.
        query = """MATCH p=()-[*0..1]->() WHERE any(node IN nodes(p) WHERE node.val = 'v1') RETURN length(p) ORDER BY length(p)"""
        actual_result = redis_graph.query(query)
        expected_result = [[0], [1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH p=()-[*0..1]->() WHERE all(node IN nodes(p) WHERE node.val = 'v1') RETURN length(p) ORDER BY length(p)"""
        actual_result = redis_graph.query(query)
        expected_result = [[0]]
        self.env.assertEquals(actual_result.result_set, expected_result)
コード例 #15
0
class TestAggregate():
    def __init__(self):
        self.env = Env()
        add_values(self.env)

    def testGroupBy(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'SORTBY', 2, '@count', 'desc',
               'LIMIT', '0', '5'
               ]

        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual([292L, ['brand', '', 'count', '1518'], ['brand', 'mad catz', 'count', '43'],
                                    ['brand', 'generic', 'count', '40'], ['brand', 'steelseries', 'count', '37'],
                                    ['brand', 'logitech', 'count', '35']], res)

    def testMinMax(self):
        cmd = ['ft.aggregate', 'games', 'sony',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0',
               'REDUCE', 'min', '1', '@price', 'as', 'minPrice',
               'SORTBY', '2', '@minPrice', 'DESC']
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        row = to_dict(res[1])
        self.env.assertEqual(88, int(float(row['minPrice'])))

        cmd = ['ft.aggregate', 'games', 'sony',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0',
               'REDUCE', 'max', '1', '@price', 'as', 'maxPrice',
               'SORTBY', '2', '@maxPrice', 'DESC']
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        self.env.assertEqual(695, int(float(row['maxPrice'])))

    def testAvg(self):
        cmd = ['ft.aggregate', 'games', 'sony',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'avg', '1', '@price', 'AS', 'avg_price',
               'REDUCE', 'count', '0',
               'SORTBY', '2', '@avg_price', 'DESC']
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual(26, res[0])
        # Ensure the formatting actually exists

        first_row = to_dict(res[1])
        self.env.assertEqual(109, int(float(first_row['avg_price'])))

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertIn('avg_price', row)

        # Test aliasing
        cmd = ['FT.AGGREGATE', 'games', 'sony', 'GROUPBY', '1', '@brand',
               'REDUCE', 'avg', '1', '@price', 'AS', 'avgPrice']
        res = self.env.cmd(*cmd)
        first_row = to_dict(res[1])
        self.env.assertEqual(17, int(float(first_row['avgPrice'])))

    def testCountDistinct(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT_DISTINCT', '1', '@title', 'AS', 'count_distinct(title)',
               'REDUCE', 'COUNT', '0'
               ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1484, int(row['count_distinct(title)']))

        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT_DISTINCTISH', '1', '@title', 'AS', 'count_distinctish(title)',
               'REDUCE', 'COUNT', '0'
               ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1461, int(row['count_distinctish(title)']))

    def testQuantile(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'QUANTILE', '2', '@price', '0.50', 'AS', 'q50',
               'REDUCE', 'QUANTILE', '2', '@price', '0.90', 'AS', 'q90',
               'REDUCE', 'QUANTILE', '2', '@price', '0.95', 'AS', 'q95',
               'REDUCE', 'AVG', '1', '@price',
               'REDUCE', 'COUNT', '0', 'AS', 'rowcount',
               'SORTBY', '2', '@rowcount', 'DESC', 'MAX', '1']

        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        # TODO: Better samples
        self.env.assertAlmostEqual(14.99, float(row['q50']), delta=3)
        self.env.assertAlmostEqual(70, float(row['q90']), delta=50)

        # This tests the 95th percentile, which is error prone because
        # so few samples actually exist. I'm disabling it for now so that
        # there is no breakage in CI
        # self.env.assertAlmostEqual(110, (float(row['q95'])), delta=50)

    def testStdDev(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'STDDEV', '1', '@price', 'AS', 'stddev(price)',
               'REDUCE', 'AVG', '1', '@price', 'AS', 'avgPrice',
               'REDUCE', 'QUANTILE', '2', '@price', '0.50', 'AS', 'q50Price',
               'REDUCE', 'COUNT', '0', 'AS', 'rowcount',
               'SORTBY', '2', '@rowcount', 'DESC',
               'LIMIT', '0', '10']
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])

        self.env.assertTrue(10 <= int(
            float(row['q50Price'])) <= 20)
        self.env.assertAlmostEqual(53, int(float(row['stddev(price)'])), delta=50)
        self.env.assertEqual(29, int(float(row['avgPrice'])))

    def testParseTime(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT', '0', 'AS', 'count',
               'APPLY', 'timefmt(1517417144)', 'AS', 'dt',
               'APPLY', 'parse_time("%FT%TZ", @dt)', 'as', 'parsed_dt',
               'LIMIT', '0', '1']
        res = self.env.cmd(*cmd)

        self.env.assertEqual(['brand', '', 'count', '1518', 'dt',
                              '2018-01-31T16:45:44Z', 'parsed_dt', '1517417144'], res[1])

    def testRandomSample(self):
        cmd = ['FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT', '0', 'AS', 'num',
               'REDUCE', 'RANDOM_SAMPLE', '2', '@price', '10',
               'SORTBY', '2', '@num', 'DESC', 'MAX', '10']
        for row in self.env.cmd(*cmd)[1:]:
            self.env.assertIsInstance(row[5], list)
            self.env.assertGreater(len(row[5]), 0)
            self.env.assertGreaterEqual(row[3], len(row[5]))

            self.env.assertLessEqual(len(row[5]), 10)

    def testTimeFunctions(self):
        cmd = ['FT.AGGREGATE', 'games', '*',

               'APPLY', '1517417144', 'AS', 'dt',
               'APPLY', 'timefmt(@dt)', 'AS', 'timefmt',
               'APPLY', 'day(@dt)', 'AS', 'day',
               'APPLY', 'hour(@dt)', 'AS', 'hour',
               'APPLY', 'minute(@dt)', 'AS', 'minute',
               'APPLY', 'month(@dt)', 'AS', 'month',
               'APPLY', 'dayofweek(@dt)', 'AS', 'dayofweek',
               'APPLY', 'dayofmonth(@dt)', 'AS', 'dayofmonth',
               'APPLY', 'dayofyear(@dt)', 'AS', 'dayofyear',
               'APPLY', 'year(@dt)', 'AS', 'year',

               'LIMIT', '0', '1']
        res = self.env.cmd(*cmd)
        self.env.assertListEqual([1L, ['dt', '1517417144', 'timefmt', '2018-01-31T16:45:44Z', 'day', '1517356800', 'hour', '1517414400',
                                       'minute', '1517417100', 'month', '1514764800', 'dayofweek', '3', 'dayofmonth', '31', 'dayofyear', '30', 'year', '2018']], res)

    def testStringFormat(self):
        cmd = ['FT.AGGREGATE', 'games', '@brand:sony',
               'GROUPBY', '2', '@title', '@brand',
               'REDUCE', 'COUNT', '0',
               'REDUCE', 'MAX', '1', '@price', 'AS', 'price',
               'APPLY', 'format("%s|%s|%s|%s", @title, @brand, "Mark", @price)', 'as', 'titleBrand',
               'LIMIT', '0', '10']
        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            expected = '%s|%s|%s|%g' % (
                row['title'], row['brand'], 'Mark', float(row['price']))
            self.env.assertEqual(expected, row['titleBrand'])

    def testSum(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'REDUCE', 'sum', 1, '@price', 'AS', 'sum(price)',
               'SORTBY', 2, '@sum(price)', 'desc',
               'LIMIT', '0', '5'
               ]
        res = self.env.cmd(*cmd)
        self.env.assertEqual([292L, ['brand', '', 'count', '1518', 'sum(price)', '44780.69'],
                             ['brand', 'mad catz', 'count',
                                 '43', 'sum(price)', '3973.48'],
                             ['brand', 'razer', 'count', '26',
                                 'sum(price)', '2558.58'],
                             ['brand', 'logitech', 'count',
                                 '35', 'sum(price)', '2329.21'],
                             ['brand', 'steelseries', 'count', '37', 'sum(price)', '1851.12']], res)

    def testFilter(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'FILTER', '@count > 5'
               ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertGreater(int(row['count']), 5)

        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'FILTER', '@count < 5',
               'FILTER', '@count > 2 && @brand != ""'
               ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertLess(int(row['count']), 5)
            self.env.assertGreater(int(row['count']), 2)

    def testToList(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count_distinct', '1', '@price', 'as', 'count',
               'REDUCE', 'tolist', 1, '@price', 'as', 'prices',
               'SORTBY', 2, '@count', 'desc',
               'LIMIT', '0', '5'
               ]
        res = self.env.cmd(*cmd)

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertEqual(int(row['count']), len(row['prices']))

    def testSortBy(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'SORTBY', 2, '@price', 'desc',
                           'LIMIT', '0', '2')

        self.env.assertListEqual([292L, ['brand', '', 'price', '44780.69'], [
                                 'brand', 'mad catz', 'price', '3973.48']], res)

        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'SORTBY', 2, '@price', 'asc',
                           'LIMIT', '0', '2')

        self.env.assertListEqual([292L, ['brand', 'myiico', 'price', '0.23'], [
                                 'brand', 'crystal dynamics', 'price', '0.25']], res)

        # Test MAX with limit higher than it
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'SORTBY', 2, '@price', 'asc', 'MAX', 2)
        
        self.env.assertListEqual([292L, ['brand', 'myiico', 'price', '0.23'], [
                                 'brand', 'crystal dynamics', 'price', '0.25']], res)

        # Test Sorting by multiple properties
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'APPLY', '(@price % 10)', 'AS', 'price',
                           'SORTBY', 4, '@price', 'asc', '@brand', 'desc', 'MAX', 10,
                           )
        self.env.assertListEqual([292L, ['brand', 'zps', 'price', '0'], ['brand', 'zalman', 'price', '0'], ['brand', 'yoozoo', 'price', '0'], ['brand', 'white label', 'price', '0'], ['brand', 'stinky', 'price', '0'], [
                                 'brand', 'polaroid', 'price', '0'], ['brand', 'plantronics', 'price', '0'], ['brand', 'ozone', 'price', '0'], ['brand', 'oooo', 'price', '0'], ['brand', 'neon', 'price', '0']], res)

    def testExpressions(self):
        pass

    def testNoGroup(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '2', '@brand', '@price',
                           'APPLY', 'floor(sqrt(@price)) % 10', 'AS', 'price',
                           'SORTBY', 4, '@price', 'desc', '@brand', 'desc', 'MAX', 5,
                           )
        exp = [2265L,
 ['brand', 'xbox', 'price', '9'],
 ['brand', 'turtle beach', 'price', '9'],
 ['brand', 'trust', 'price', '9'],
 ['brand', 'steelseries', 'price', '9'],
 ['brand', 'speedlink', 'price', '9']]
        # exp = [2265L, ['brand', 'Xbox', 'price', '9'], ['brand', 'Turtle Beach', 'price', '9'], [
                            #  'brand', 'Trust', 'price', '9'], ['brand', 'SteelSeries', 'price', '9'], ['brand', 'Speedlink', 'price', '9']]
        self.env.assertListEqual(exp[1], res[1])

    def testLoad(self):
        res = self.env.cmd('ft.aggregate', 'games', '*',
                           'LOAD', '3', '@brand', '@price', '@nonexist',
                           'SORTBY', 2, '@price', 'DESC', 'MAX', 2)
        exp = [3L, ['brand', '', 'price', '759.12'], ['brand', 'Sony', 'price', '695.8']]
        self.env.assertEqual(exp[1], res[1])

    def testSplit(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'APPLY', 'split("hello world,  foo,,,bar,", ",", " ")', 'AS', 'strs',
                           'APPLY', 'split("hello world,  foo,,,bar,", " ", ",")', 'AS', 'strs2',
                           'APPLY', 'split("hello world,  foo,,,bar,", "", "")', 'AS', 'strs3',
                           'APPLY', 'split("hello world,  foo,,,bar,")', 'AS', 'strs4',
                           'APPLY', 'split("hello world,  foo,,,bar,",",")', 'AS', 'strs5',
                           'APPLY', 'split("")', 'AS', 'empty',
                           'LIMIT', '0', '1'
                           )
        # print "Got {} results".format(len(res))
        # return
        # pprint.pprint(res)
        self.env.assertListEqual([1L, ['strs', ['hello world', 'foo', 'bar'],
                                       'strs2', ['hello', 'world', 'foo,,,bar'],
                                       'strs3', ['hello world,  foo,,,bar,'],
                                       'strs4', ['hello world', 'foo', 'bar'],
                                       'strs5', ['hello world', 'foo', 'bar'],
                                       'empty', []]], res)

    def testFirstValue(self):
        res = self.env.cmd('ft.aggregate', 'games', '@brand:(sony|matias|beyerdynamic|(mad catz))',
                           'GROUPBY', 1, '@brand',
                           'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price', 'DESC', 'AS', 'top_item',
                           'REDUCE', 'FIRST_VALUE', 4, '@price', 'BY', '@price', 'DESC', 'AS', 'top_price',
                           'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price', 'ASC', 'AS', 'bottom_item',
                           'REDUCE', 'FIRST_VALUE', 4, '@price', 'BY', '@price', 'ASC', 'AS', 'bottom_price',
                           'SORTBY', 2, '@top_price', 'DESC', 'MAX', 5
                           )
        expected = [4L, ['brand', 'sony', 'top_item', 'sony psp slim &amp; lite 2000 console', 'top_price', '695.8', 'bottom_item', 'sony dlchd20p high speed hdmi cable for playstation 3', 'bottom_price', '5.88'],
                                 ['brand', 'matias', 'top_item', 'matias halfkeyboard usb', 'top_price',
                                     '559.99', 'bottom_item', 'matias halfkeyboard usb', 'bottom_price', '559.99'],
                                 ['brand', 'beyerdynamic', 'top_item', 'beyerdynamic mmx300 pc gaming premium digital headset with microphone', 'top_price', '359.74',
                                     'bottom_item', 'beyerdynamic headzone pc gaming digital surround sound system with mmx300 digital headset with microphone', 'bottom_price', '0'],
                                 ['brand', 'mad catz', 'top_item', 'mad catz s.t.r.i.k.e.7 gaming keyboard', 'top_price', '295.95', 'bottom_item', 'madcatz mov4545 xbox replacement breakaway cable', 'bottom_price', '3.49']]
        self.env.assertListEqual(expected, res)

    def testLoadAfterGroupBy(self):
        with self.env.assertResponseError():
            self.env.cmd('ft.aggregate', 'games', '*',
                         'GROUPBY', 1, '@brand',
                         'LOAD', 1, '@brand')
コード例 #16
0
ファイル: test_params.py プロジェクト: yuyfbj/RedisGraph
class testParams(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def setUp(self):
        self.env.flush()
    
    def test_simple_params(self):
        params = [1, 2.3, -1, -2.3, "str", True, False, None, [0, 1, 2]]
        query = "RETURN $param"
        for param in params:    
            expected_results = [[param]]
            query_info = QueryInfo(query = query, description="Tests simple params", expected_result = expected_results)
            self._assert_resultset_equals_expected(redis_graph.query(query, {'param': param}), query_info)

    def test_expression_on_param(self):
        params = {'param': 1}
        query = "RETURN $param + 1"
        expected_results = [[2]]
            
        query_info = QueryInfo(query = query, description="Tests expression on param", expected_result = expected_results)
        self._assert_resultset_equals_expected(redis_graph.query(query, params), query_info)

    def test_node_retrival(self):
        p0 = Node(node_id=0, label="Person", properties={'name': 'a'})
        p1 = Node(node_id=1, label="Person", properties={'name': 'b'})
        p2 = Node(node_id=2, label="NoPerson", properties={'name': 'a'})
        redis_graph.add_node(p0)
        redis_graph.add_node(p1)
        redis_graph.add_node(p2)
        redis_graph.flush()

        params = {'name': 'a'}
        query = "MATCH (n :Person {name:$name}) RETURN n"
        expected_results = [[p0]]
            
        query_info = QueryInfo(query = query, description="Tests expression on param", expected_result = expected_results)
        self._assert_resultset_equals_expected(redis_graph.query(query, params), query_info)

    def test_parameterized_skip_limit(self):
        params = {'skip': 1, 'limit': 1}
        query = "UNWIND [1,2,3] AS X RETURN X SKIP $skip LIMIT $limit"
        expected_results = [[2]]
            
        query_info = QueryInfo(query = query, description="Tests skip limit as params", expected_result = expected_results)
        self._assert_resultset_equals_expected(redis_graph.query(query, params), query_info)

        # Set one parameter to non-integer value
        params = {'skip': '1', 'limit': 1}
        try:
            redis_graph.query(query, params)
            assert(False)
        except redis.exceptions.ResponseError as e:
            pass

    def test_missing_parameter(self):
        # Make sure missing parameters are reported back as an error.
        query = "RETURN $missing"
        try:
            redis_graph.query(query)
            assert(False)
        except:
            # Expecting an error.
            pass

        query = "MATCH (a) WHERE a.v = $missing RETURN a"
        try:
            redis_graph.query(query)
            assert(False)
        except:
            # Expecting an error.
            pass

        query = "MATCH (a) SET a.v = $missing RETURN a"
        try:
            redis_graph.query(query)
            assert(False)
        except:
            # Expecting an error.
            pass

    def test_id_scan(self):
        redis_graph.query("CREATE ({val:1})")
        expected_results=[[1]]
        params = {'id' : 0}
        query = "MATCH (n) WHERE id(n)=$id return n.val"
        query_info = QueryInfo(query = query, description="Test id scan with params", expected_result = expected_results)
        self._assert_resultset_equals_expected(redis_graph.query(query, params), query_info)
        query = redis_graph.build_params_header(params) + query
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('NodeByIdSeek', plan)
コード例 #17
0
class testGraphCreationFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def test01_create_return(self):
        query = """CREATE (a:person {name:'A'}), (b:person {name:'B'})"""
        result = redis_graph.query(query)
        self.env.assertEquals(result.nodes_created, 2)

        query = """MATCH (src:person) CREATE (src)-[e:knows]->(dest {name:'C'}) RETURN src,e,dest ORDER BY ID(src) DESC LIMIT 1"""
        result = redis_graph.query(query)
        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.relationships_created, 2)
        self.env.assertEquals(len(result.result_set), 1)
        self.env.assertEquals(result.result_set[0][0].properties['name'], 'B')

    def test02_create_from_prop(self):
        query = """MATCH (p:person)-[e:knows]->() CREATE (c:clone {doublename: p.name + toLower(p.name), source_of: TYPE(e)}) RETURN c.doublename, c.source_of ORDER BY c.doublename"""
        result = redis_graph.query(query)
        expected_result = [['Aa', 'knows'], ['Bb', 'knows']]

        self.env.assertEquals(result.labels_added, 1)
        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.properties_set, 4)
        self.env.assertEquals(result.result_set, expected_result)

    def test03_create_from_projection(self):
        query = """UNWIND [10,20,30] AS x CREATE (p:person {age:x}) RETURN p.age ORDER BY p.age"""
        result = redis_graph.query(query)
        expected_result = [[10], [20], [30]]
        self.env.assertEquals(result.nodes_created, 3)
        self.env.assertEquals(result.properties_set, 3)
        self.env.assertEquals(result.result_set, expected_result)

    def test04_create_with_null_properties(self):
        query = """CREATE (a:L {v1: NULL, v2: 'prop'}) RETURN a"""
        result = redis_graph.query(query)
        node = Node(label="L", properties={"v2": "prop"})
        expected_result = [[node]]

        self.env.assertEquals(result.labels_added, 1)
        self.env.assertEquals(result.nodes_created, 1)
        self.env.assertEquals(result.properties_set, 1)
        self.env.assertEquals(result.result_set, expected_result)

        # Create 2 new nodes, one with no properties and one with a property 'v'
        query = """CREATE (:M), (:M {v: 1})"""
        redis_graph.query(query)

        # Verify that a MATCH...CREATE accesses the property correctly.
        query = """MATCH (m:M) WITH m ORDER BY m.v DESC CREATE ({v: m.v})"""
        result = redis_graph.query(query)
        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.properties_set, 1)

    def test05_create_with_property_reference(self):
        # Skip this test if running under Valgrind, as it causes a memory leak.
        if Env().envRunner.debugger is not None:
            Env().skip()

        # Queries that reference properties before they have been created should emit an error.
        try:
            query = """CREATE (a {val: 2}), (b {val: a.val})"""
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn("undefined property", e.message)
コード例 #18
0
class test_v7_encode_decode(FlowTestsBase):
    def __init__(self):
        self.env = Env(moduleArgs='VKEY_MAX_ENTITY_COUNT 10')
        global redis_con
        redis_con = self.env.getConnection()

    def test01_nodes_over_multiple_keys(self):
        graph_name = "nodes_over_multiple_keys"
        redis_graph = Graph(graph_name, redis_con)
        # Create 3 nodes meta keys
        redis_graph.query("UNWIND range(0,20) as i CREATE ({val:i})")
        # Return all the nodes, before and after saving & loading the RDB, and check equality
        query = "MATCH (n) return n"
        expected = redis_graph.query(query)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        actual = redis_graph.query(query)
        self.env.assertEquals(expected.result_set, actual.result_set)

    def test02_no_compaction_on_nodes_delete(self):
        graph_name = "no_compaction_on_nodes_delete"
        redis_graph = Graph(graph_name, redis_con)
        # Create 3 nodes meta keys
        redis_graph.query("UNWIND range(0,20) as i CREATE ()")
        # Return all the nodes, before and after saving & loading the RDB, and check equality
        query = "MATCH (n) WITH n ORDER by id(n) return COLLECT(id(n))"
        expected_full_graph_nodes_id = redis_graph.query(query)
        # Delete 3 nodes.
        redis_graph.query("MATCH (n) WHERE id(n) IN [7,14,20] DELETE n")
        expected_nodes_id_after_delete = redis_graph.query(query)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        actual = redis_graph.query(query)
        # Validate no compaction, all IDs are the same
        self.env.assertEquals(expected_nodes_id_after_delete.result_set,
                              actual.result_set)
        # Validate reuse of node ids - create 3 nodes.
        redis_graph.query("UNWIND range (0,2) as i CREATE ()")
        actual = redis_graph.query(query)
        self.env.assertEquals(expected_full_graph_nodes_id.result_set,
                              actual.result_set)

    def test03_edges_over_multiple_keys(self):
        graph_name = "edges_over_multiple_keys"
        redis_graph = Graph(graph_name, redis_con)
        # Create 3 edges meta keys
        redis_graph.query("UNWIND range(0,20) as i CREATE ()-[:R {val:i}]->()")
        # Return all the edges, before and after saving & loading the RDB, and check equality
        query = "MATCH ()-[e]->() return e"
        expected = redis_graph.query(query)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        actual = redis_graph.query(query)
        self.env.assertEquals(expected.result_set, actual.result_set)

    def test04_no_compaction_on_edges_delete(self):
        graph_name = "no_compaction_on_edges_delete"
        redis_graph = Graph(graph_name, redis_con)
        # Create 3 nodes meta keys
        redis_graph.query("UNWIND range(0,20) as i CREATE ()-[:R]->()")
        # Return all the edges, before and after saving & loading the RDB, and check equality
        query = "MATCH ()-[e]->() WITH e ORDER by id(e) return COLLECT(id(e))"
        expected_full_graph_nodes_id = redis_graph.query(query)
        # Delete 3 edges.
        redis_graph.query("MATCH ()-[e]->() WHERE id(e) IN [7,14,20] DELETE e")
        expected_nodes_id_after_delete = redis_graph.query(query)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        actual = redis_graph.query(query)
        # Validate no compaction, all IDs are the same
        self.env.assertEquals(expected_nodes_id_after_delete.result_set,
                              actual.result_set)
        # Validate reuse of edges ids - create 3 edges.
        redis_graph.query("UNWIND range (0,2) as i CREATE ()-[:R]->()")
        actual = redis_graph.query(query)
        self.env.assertEquals(expected_full_graph_nodes_id.result_set,
                              actual.result_set)

    def test05_multiple_edges_over_multiple_keys(self):
        graph_name = "multiple_edges_over_multiple_keys"
        redis_graph = Graph(graph_name, redis_con)
        # Create 3 edges meta keys
        redis_graph.query(
            "CREATE (n1 {val:1}), (n2 {val:2}) WITH n1, n2 UNWIND range(0,20) as i CREATE (n1)-[:R {val:i}]->(n2)"
        )
        # Return all the edges, before and after saving & loading the RDB, and check equality
        query = "MATCH ()-[e]->() return e"
        expected = redis_graph.query(query)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        actual = redis_graph.query(query)
        self.env.assertEquals(expected.result_set, actual.result_set)

    def test06_no_compaction_on_multiple_edges_delete(self):
        graph_name = "no_compaction_on_multiple_edges_delete"
        redis_graph = Graph(graph_name, redis_con)
        # Create 3 nodes meta keys
        redis_graph.query(
            "CREATE (n1 {val:1}), (n2 {val:2}) WITH n1, n2 UNWIND range(0,20) as i CREATE (n1)-[:R]->(n2)"
        )
        # Return all the edges, before and after saving & loading the RDB, and check equality
        query = "MATCH ()-[e]->() WITH e ORDER by id(e) return COLLECT(id(e))"
        expected_full_graph_nodes_id = redis_graph.query(query)
        # Delete 3 edges.
        redis_graph.query("MATCH ()-[e]->() WHERE id(e) IN [7,14,20] DELETE e")
        expected_nodes_id_after_delete = redis_graph.query(query)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        actual = redis_graph.query(query)
        # Validate no compaction, all IDs are the same
        self.env.assertEquals(expected_nodes_id_after_delete.result_set,
                              actual.result_set)
        # Validate reuse of edges ids - create 3 edges.
        redis_graph.query(
            "MATCH (n1 {val:1}), (n2 {val:2}) WITH n1, n2 UNWIND range (0,2) as i CREATE ()-[:R]->()"
        )
        actual = redis_graph.query(query)
        self.env.assertEquals(expected_full_graph_nodes_id.result_set,
                              actual.result_set)

    def test07_index_after_encode_decode_in_v7(self):
        graph_name = "index_after_encode_decode_in_v7"
        redis_graph = Graph(graph_name, redis_con)
        redis_graph.query("CREATE INDEX ON :N(val)")
        # Verify indices exists.
        plan = redis_graph.execution_plan("MATCH (n:N {val:1}) RETURN n")
        self.env.assertIn("Index Scan", plan)
        # Save RDB & Load from RDB
        redis_con.execute_command("DEBUG", "RELOAD")
        # Verify indices exists after loading RDB.
        plan = redis_graph.execution_plan("MATCH (n:N {val:1}) RETURN n")
        self.env.assertIn("Index Scan", plan)
コード例 #19
0
class testMap(FlowTestsBase):
    def __init__(self):
        global redis_graph
        self.env = Env(decodeResponses=True)
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)
        self.populate_graph()

    def populate_graph(self):
        # Construct a graph with the form:
        # (v1)-[:E]->(v2)-[:E]->(v3)
        q = """CREATE (:L {val:1})-[:E]->(:L {val:2})-[:E]->(:L {val:3})"""
        redis_graph.query(q)

    # Validate basic map lookup operations
    def test01_basic_map_accesses(self):
        # Return a full map
        query = """WITH {val: 5} AS map RETURN map"""
        query_result = redis_graph.query(query)
        expected_result = [[{'val': 5}]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Return a map value with dot notation
        query = """WITH {val: 5} AS map RETURN map.val"""
        query_result = redis_graph.query(query)
        expected_result = [[5]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Return a map value with bracket notation
        query = """WITH {val: 5} AS map RETURN map['val']"""
        query_result = redis_graph.query(query)
        expected_result = [[5]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate map projection behavior
    def test02_map_projections(self):
        query = """MATCH (a) RETURN a {.val} ORDER BY a.val"""
        query_result = redis_graph.query(query)
        expected_result = [[{'val': 1}],
                           [{'val': 2}],
                           [{'val': 3}]]
        self.env.assertEquals(query_result.result_set, expected_result)

        query = """WITH 'lit' AS literal MATCH (a) RETURN a {.val, literal} ORDER BY a.val"""
        query_result = redis_graph.query(query)
        expected_result = [[{'val': 1, 'literal': 'lit'}],
                           [{'val': 2, 'literal': 'lit'}],
                           [{'val': 3, 'literal': 'lit'}]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate behaviors of nested maps
    def test03_nested_maps(self):
        # Return a map with nesting
        query = """WITH {val: 5, nested: {nested_val: 'nested_str'}} AS map RETURN map"""
        query_result = redis_graph.query(query)
        expected_result = [[{'val': 5, 'nested': {'nested_val': 'nested_str'}}]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Return just the nested value
        query = """WITH {val: 5, nested: {nested_val: 'nested_str'}} AS map RETURN map.nested"""
        query_result = redis_graph.query(query)
        expected_result = [[{'nested_val': 'nested_str'}]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Perform operations on map values
        query = """WITH {val: 5, nested: {nested_val: 'nested_str'}} AS map RETURN map.val + '_' + map.nested.nested_val"""
        query_result = redis_graph.query(query)
        expected_result = [['5_nested_str']]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Deeply nested map
        query = """RETURN {a: {b: {c: {d: {e: {f: {g: {h: {i: {j: {}}}}}}}}}}}"""
        query_result = redis_graph.query(query)
        expected_result = [[{'a': {'b': {'c': {'d': {'e': {'f': {'g': {'h': {'i': {'j': {}}}}}}}}}}}]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate map sorting logic (first by keys, then by values)
    def test04_map_sorting(self):
        query = """UNWIND[{b: 1}, {a: 2}] AS map RETURN map ORDER BY map"""
        query_result = redis_graph.query(query)
        expected_result = [[{'a': 2}],
                           [{'b': 1}]]
        self.env.assertEquals(query_result.result_set, expected_result)

        query = """UNWIND[{a: 2}, {a: 1}] AS map RETURN map ORDER BY map"""
        query_result = redis_graph.query(query)
        expected_result = [[{'a': 1}],
                           [{'a': 2}]]
        self.env.assertEquals(query_result.result_set, expected_result)

        query = """UNWIND[{a: 2}, {x: 1, k: 2}] AS map RETURN map ORDER BY map"""
        query_result = redis_graph.query(query)
        expected_result = [[{'a': 2}],
                           [{'x': 1, 'k': 2}]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate map comparison logic (first by keys, then by values)
    def test05_map_comparison(self):
        query = """WITH {b: 2} AS map_1, {a: 1} AS map_2 RETURN map_1 > map_2, map_1 < map_2, map_1 = map_2, map_1 <> map_2"""
        query_result = redis_graph.query(query)
        expected_result = [[True, False, False, True]]
        self.env.assertEquals(query_result.result_set, expected_result)

        query = """WITH {a: 2} AS map_1, {a: 1} AS map_2 RETURN map_1 > map_2, map_1 < map_2, map_1 = map_2, map_1 <> map_2"""
        query_result = redis_graph.query(query)
        expected_result = [[True, False, False, True]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Map equality is not predicated on key order.
        query = """WITH {a: 1, b: 2} AS map_1, {b: 2, a: 1} AS map_2 RETURN map_1 > map_2, map_1 < map_2, map_1 = map_2, map_1 <> map_2"""
        query_result = redis_graph.query(query)
        expected_result = [[False, False, True, False]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate that maps are handled correctly by the DISTINCT operator.
    def test05_map_distinct(self):
        # Map uniqueness is not predicated on key order.
        query = """UNWIND[{b: 2, a: 1}, {b: 2, a: 1}, {a: 1, b: 2}] AS map RETURN DISTINCT map"""
        query_result = redis_graph.query(query)
        expected_result = [[{'a': 1, 'b': 2}]]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Validate that trying to access a map with a non-string key errors gracefully.
    def test06_map_invalid_key_lookup(self):
        try:
            query = """WITH {val: 5} AS map RETURN map[0]"""
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn("Type mismatch", str(e))
コード例 #20
0
class testTraversalConstruction():
    def __init__(self):
        global graph

        self.env = Env(decodeResponses=True)
        redis_con = self.env.getConnection()
        graph = Graph("TraversalConstruction", redis_con)

    # Test differing starting points for the same search pattern
    def test_starting_point(self):
        # Neither the source nor the destination are labeled
        # perform an AllNodeScan from the source node.
        query = """MATCH (a)-[]->(b) RETURN a, b"""
        plan = graph.execution_plan(query)
        self.env.assertIn("All Node Scan | (a)", plan)

        # Destination is labeled, perform a LabelScan from the destination node.
        query = """MATCH (a)-[]->(b:B) RETURN a, b"""
        plan = graph.execution_plan(query)
        self.env.assertIn("Node By Label Scan | (b:B)", plan)

        # Destination is filtered, perform an AllNodeScan from the destination node.
        query = """MATCH (a)-[]->(b) WHERE b.v = 2 RETURN a, b"""
        plan = graph.execution_plan(query)
        self.env.assertIn("All Node Scan | (b)", plan)

        # Destination is labeled but source is filtered, perform an AllNodeScan from the source node.
        query = """MATCH (a)-[]->(b:B) WHERE a.v = 1 OR a.v = 3 RETURN a, b"""
        plan = graph.execution_plan(query)
        self.env.assertIn("All Node Scan | (a)", plan)

        # Both are labeled and source is filtered, perform a LabelScan from the source node.
        query = """MATCH (a:A)-[]->(b:B) WHERE a.v = 3 RETURN a, b"""
        plan = graph.execution_plan(query)
        self.env.assertIn("Node By Label Scan | (a:A)", plan)

        # Both are labeled and dest is filtered, perform a LabelScan from the dest node.
        query = """MATCH (a:A)-[]->(b:B) WHERE b.v = 2 RETURN a, b"""
        plan = graph.execution_plan(query)
        self.env.assertIn("Node By Label Scan | (b:B)", plan)

    # make sure traversal begins with labeled entity
    def test_start_with_label(self):
        queries = [
            "MATCH (A:L)-->(B)-->(C) RETURN 1",
            # "MATCH (A)-->(B:L)-->(C) RETURN 1", # improve on this case
            "MATCH (A)-->(B)-->(C:L) RETURN 1"
        ]

        for q in queries:
            plan = graph.execution_plan(q)
            ops = plan.split(os.linesep)
            ops.reverse()
            self.env.assertTrue("Node By Label Scan" in ops[0])

    # make sure traversal begins with filtered entity
    def test_start_with_filter(self):
        # MATCH (A)-->(B)-->(C) WHERE A.val = 1 RETURN *
        # MATCH (A)-->(B)-->(C) WHERE B.val = 1 RETURN *
        # MATCH (A)-->(B)-->(C) WHERE C.val = 1 RETURN *
        entities = ['A', 'B', 'C']
        for e in entities:
            q = """MATCH (A)-->(B)-->(C) WHERE {}.val = 1 RETURN *""".format(e)
            plan = graph.execution_plan(q)
            ops = plan.split(os.linesep)
            ops.reverse()

            self.env.assertTrue("All Node Scan | ({})".format(e) in ops[0])
            self.env.assertTrue("Filter" in ops[1])

    # make sure traversal begins with bound entity
    def test_start_with_bound(self):
        # MATCH (X) WITH X as A MATCH (A)-->(B)-->(C) RETURN *
        # MATCH (X) WITH X as B MATCH (A)-->(B)-->(C) RETURN *
        # MATCH (X) WITH X as C MATCH (A)-->(B)-->(C) RETURN *
        entities = ['A', 'B', 'C']
        for e in entities:
            q = "MATCH (X) WITH X as {} MATCH (A)-->(B)-->(C) RETURN *".format(
                e)
            plan = graph.execution_plan(q)
            ops = plan.split(os.linesep)
            ops.reverse()
            self.env.assertTrue(
                "Conditional Traverse | ({}".format(e) in ops[2])

    # make sure traversal begins with bound entity and follows with filter
    def test_start_with_bound_follows_with_filter(self):
        queries = [
            "MATCH (X) WITH X AS B MATCH (A {v:1})-->(B)-->(C) RETURN *",
            "MATCH (X) WITH X AS B MATCH (A)-->(B)-->(C {v:1}) RETURN *"
        ]
        for q in queries:
            plan = graph.execution_plan(q)
            ops = plan.split(os.linesep)
            ops.reverse()
            self.env.assertTrue("Filter" in ops[3])

    def test_filter_as_early_as_possible(self):
        q = """MATCH (A:L {v: 1})-->(B)-->(C), (B)-->(D:L {v: 1}) RETURN 1"""
        plan = graph.execution_plan(q)
        ops = plan.split(os.linesep)
        ops.reverse()
        self.env.assertTrue("Node By Label Scan"
                            in ops[0])  # scan either A or D
        self.env.assertTrue("Filter" in ops[1])  # filter either A or D
        self.env.assertTrue("Conditional Traverse"
                            in ops[2])  # traverse from A to D or from D to A
        self.env.assertTrue("Conditional Traverse"
                            in ops[3])  # traverse from A to D or from D to A
        self.env.assertTrue("Filter" in ops[4])  # filter either A or D

    def test_long_pattern(self):
        q = """match (a)--(b)--(c)--(d)--(e)--(f)--(g)--(h)--(i)--(j)--(k)--(l) return *"""
        plan = graph.execution_plan(q)
        ops = plan.split(os.linesep)
        self.env.assertEqual(len(ops), 14)

    def test_start_with_index_filter(self):
        # TODO: enable this test, once we'll score higher filters that
        # have the potential turn into index scan
        return

        q = """CREATE INDEX ON :L(v)"""
        graph.query(q)

        q = """MATCH (a:L {v:1})-[]-(b:L {x:1}) RETURN a, b"""
        plan = graph.execution_plan(q)
        ops = plan.split(os.linesep)
        ops.reverse()
        self.env.assertTrue("Index Scan" in ops[0])  # start with index scan

        q = """MATCH (a:L {x:1})-[]-(b:L {v:1}) RETURN a, b"""
        plan = graph.execution_plan(q)
        ops = plan.split(os.linesep)
        ops.reverse()
        self.env.assertTrue("Index Scan" in ops[0])  # start with index scan
コード例 #21
0
class testIndexScanFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env()

    def setUp(self):
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(social_utils.graph_name, redis_con)
        social_utils.populate_graph(redis_con, redis_graph)
        self.build_indices()

    def tearDown(self):
        self.env.cmd('flushall')

    def build_indices(self):
        global redis_graph
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social", "CREATE INDEX ON :person(age)")
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social", "CREATE INDEX ON :country(name)")

    # Validate that Cartesian products using index and label scans succeed
    def test01_cartesian_product_mixed_scans(self):
        query = "MATCH (p:person), (c:country) WHERE p.age > 0 RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)
        indexed_result = redis_graph.query(query)

        query = "MATCH (p:person), (c:country) RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertNotIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)
        unindexed_result = redis_graph.query(query)

        self.env.assertEquals(indexed_result.result_set, unindexed_result.result_set)

    # Validate that Cartesian products using just index scans succeed
    def test02_cartesian_product_index_scans_only(self):
        query = "MATCH (p:person), (c:country) WHERE p.age > 0 AND c.name > '' RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        # The two streams should both use index scans
        self.env.assertEquals(plan.count('Index Scan'), 2)
        self.env.assertNotIn('Label Scan', plan)
        indexed_result = redis_graph.query(query)

        query = "MATCH (p:person), (c:country) RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertNotIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)
        unindexed_result = redis_graph.query(query)

        self.env.assertEquals(indexed_result.result_set, unindexed_result.result_set)

    # Validate that the appropriate bounds are respected when a Cartesian product uses the same index in two streams
    def test03_cartesian_product_reused_index(self):
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social", "CREATE INDEX ON :person(name)")
        query = "MATCH (a:person {name: 'Omri Traub'}), (b:person) WHERE b.age <= 30 RETURN a.name, b.name ORDER BY a.name, b.name"
        plan = redis_graph.execution_plan(query)
        # The two streams should both use index scans
        self.env.assertEquals(plan.count('Index Scan'), 2)
        self.env.assertNotIn('Label Scan', plan)


        expected_result = [['Omri Traub', 'Gal Derriere'],
                           ['Omri Traub', 'Lucy Yanfital']]
        result = redis_graph.query(query)

        self.env.assertEquals(result.result_set, expected_result)

    # Validate index utilization when filtering on a numeric field with the `IN` keyword.
    def test04_test_in_operator_numerics(self):
        # Validate the transformation of IN to multiple OR expressions.
        query = "MATCH (p:person) WHERE p.age IN [1,2,3] RETURN p"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        # Validate that nested arrays are not scanned in index.
        query = "MATCH (p:person) WHERE p.age IN [[1,2],3] RETURN p"
        plan = redis_graph.execution_plan(query)
        self.env.assertNotIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)

        # Validate the transformation of IN to multiple OR, over a range.
        query = "MATCH (p:person) WHERE p.age IN range(0,30) RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

         # Validate the transformation of IN to empty index iterator.
        query = "MATCH (p:person) WHERE p.age IN [] RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = []
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of IN OR IN to empty index iterators.
        query = "MATCH (p:person) WHERE p.age IN [] OR p.age IN [] RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = []
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of multiple IN filters.
        query = "MATCH (p:person) WHERE p.age IN [26, 27, 30] OR p.age IN [33, 34, 35] RETURN p.name ORDER BY p.age"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital'], ['Omri Traub'], ['Noam Nativ']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of multiple IN filters.
        query = "MATCH (p:person) WHERE p.age IN [26, 27, 30] OR p.age IN [33, 34, 35] OR p.age IN [] RETURN p.name ORDER BY p.age"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital'], ['Omri Traub'], ['Noam Nativ']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

    # Validate index utilization when filtering on string fields with the `IN` keyword.
    def test05_test_in_operator_string_props(self):
        # Build an index on the name property.
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social", "CREATE INDEX ON :person(name)")
        # Validate the transformation of IN to multiple OR expressions over string properties.
        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Combine numeric and string filters specified by IN.
        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] AND p.age in [30] RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Lucy Yanfital']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

         # Validate an empty index on IN with multiple indexes
        query = "MATCH (p:person) WHERE p.name IN [] OR p.age IN [] RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = []
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Combine IN filters with other relational filters.
        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] AND p.name < 'H' RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Gal Derriere']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] OR p.age = 33 RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital'], ['Omri Traub']]
        result = redis_graph.query(query)
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

    # ',' is the default separator for tag indices
    # we've updated our separator to '\0' this test verifies issue 696:
    # https://github.com/RedisGraph/RedisGraph/issues/696
    def test06_tag_separator(self):
        redis_con = self.env.getConnection()
        redis_graph = Graph("G", redis_con)

        # Create a single node with a long string property, introduce a comma as part of the string.
        query = """CREATE (:Node{value:"A ValuePartition is a pattern that describes a restricted set of classes from which a property can be associated. The parent class is used in restrictions, and the covering axiom means that only members of the subclasses may be used as values."})"""
        redis_graph.query(query)

        # Index property.
        query = """CREATE INDEX ON :Node(value)"""
        redis_graph.query(query)

        # Make sure node is returned by index scan.
        query = """MATCH (a:Node{value:"A ValuePartition is a pattern that describes a restricted set of classes from which a property can be associated. The parent class is used in restrictions, and the covering axiom means that only members of the subclasses may be used as values."}) RETURN a"""
        plan = redis_graph.execution_plan(query)
        result_set = redis_graph.query(query).result_set
        self.env.assertIn('Index Scan', plan)
        self.env.assertEqual(len(result_set), 1)

    def test07_index_scan_and_id(self):
        redis_con = self.env.getConnection()
        redis_graph = Graph("G", redis_con)
        nodes=[]
        for i in range(10):
            node = Node(node_id=i, label='person', properties={'age':i})
            nodes.append(node)
            redis_graph.add_node(node)
            redis_graph.flush()
        
        query = """CREATE INDEX ON :person(age)"""
        query_result = redis_graph.query(query)
        self.env.assertEqual(1, query_result.indices_created)

        query = """MATCH (n:person) WHERE id(n)>=7 AND n.age<9 RETURN n ORDER BY n.age"""
        plan = redis_graph.execution_plan(query)
        query_result = redis_graph.query(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertIn('Filter', plan)
        query_result = redis_graph.query(query)

        self.env.assertEqual(2, len(query_result.result_set))
        expected_result = [[nodes[7]], [nodes[8]]]
        self.env.assertEquals(expected_result, query_result.result_set)

    # Validate placement of index scans and filter ops when not all filters can be replaced.
    def test08_index_scan_multiple_filters(self):
        query = "MATCH (p:person) WHERE p.age = 30 AND NOT EXISTS(p.fakeprop) RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)
        self.env.assertIn('Filter', plan)

        query_result = redis_graph.query(query)
        expected_result = ["Lucy Yanfital"]
        self.env.assertEquals(query_result.result_set[0], expected_result)
コード例 #22
0
class testGraphCreationFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def test01_create_return(self):
        query = """CREATE (a:person {name:'A'}), (b:person {name:'B'})"""
        result = redis_graph.query(query)
        self.env.assertEquals(result.nodes_created, 2)

        query = """MATCH (src:person) CREATE (src)-[e:knows]->(dest {name:'C'}) RETURN src,e,dest ORDER BY ID(src) DESC LIMIT 1"""
        result = redis_graph.query(query)
        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.relationships_created, 2)
        self.env.assertEquals(len(result.result_set), 1)
        self.env.assertEquals(result.result_set[0][0].properties['name'], 'B')

    def test02_create_from_prop(self):
        query = """MATCH (p:person)-[e:knows]->() CREATE (c:clone {doublename: p.name + toLower(p.name), source_of: TYPE(e)}) RETURN c.doublename, c.source_of ORDER BY c.doublename"""
        result = redis_graph.query(query)
        expected_result = [['Aa', 'knows'], ['Bb', 'knows']]

        self.env.assertEquals(result.labels_added, 1)
        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.properties_set, 4)
        self.env.assertEquals(result.result_set, expected_result)

    def test03_create_from_projection(self):
        query = """UNWIND [10,20,30] AS x CREATE (p:person {age:x}) RETURN p.age ORDER BY p.age"""
        result = redis_graph.query(query)
        expected_result = [[10], [20], [30]]
        self.env.assertEquals(result.nodes_created, 3)
        self.env.assertEquals(result.properties_set, 3)
        self.env.assertEquals(result.result_set, expected_result)

        query = """UNWIND ['Vancouver', 'Portland', 'Calgary'] AS city CREATE (p:person {birthplace: city}) RETURN p.birthplace ORDER BY p.birthplace"""
        result = redis_graph.query(query)
        expected_result = [['Calgary'], ['Portland'], ['Vancouver']]
        self.env.assertEquals(result.nodes_created, 3)
        self.env.assertEquals(result.properties_set, 3)
        self.env.assertEquals(result.result_set, expected_result)

    def test04_create_with_null_properties(self):
        query = """CREATE (a:L {v1: NULL, v2: 'prop'}) RETURN a"""
        result = redis_graph.query(query)
        node = Node(label="L", properties={"v2": "prop"})
        expected_result = [[node]]

        self.env.assertEquals(result.labels_added, 1)
        self.env.assertEquals(result.nodes_created, 1)
        self.env.assertEquals(result.properties_set, 1)
        self.env.assertEquals(result.result_set, expected_result)

        # Create 2 new nodes, one with no properties and one with a property 'v'
        query = """CREATE (:M), (:M {v: 1})"""
        redis_graph.query(query)

        # Verify that a MATCH...CREATE accesses the property correctly.
        query = """MATCH (m:M) WITH m ORDER BY m.v DESC CREATE ({v: m.v})"""
        result = redis_graph.query(query)
        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.properties_set, 1)

    def test05_create_with_property_reference(self):
        # Skip this test if running under Valgrind, as it causes a memory leak.
        if self.env.envRunner.debugger is not None:
            self.env.skip()

        # Queries that reference properties before they have been created should emit an error.
        try:
            query = """CREATE (a {val: 2}), (b {val: a.val})"""
            redis_graph.query(query)
            self.env.assertTrue(False)
        except redis.exceptions.ResponseError as e:
            self.env.assertIn("undefined property", str(e))

    def test06_create_project_volatile_value(self):
        # The path e is volatile; verify that it can be projected after entity creation.
        query = """MATCH ()-[e*]->() CREATE (:L) WITH e RETURN 5"""
        result = redis_graph.query(query)
        expected_result = [[5], [5]]

        self.env.assertEquals(result.nodes_created, 2)
        self.env.assertEquals(result.result_set, expected_result)

        query = """UNWIND [1, 2] AS val WITH collect(val) AS arr CREATE (:L) RETURN arr"""
        result = redis_graph.query(query)
        expected_result = [[[1, 2]]]

        self.env.assertEquals(result.nodes_created, 1)
        self.env.assertEquals(result.result_set, expected_result)

    # Fail when a property is a complex type nested within an array type
    def test07_create_invalid_complex_type_in_array(self):
        # Test combinations of invalid types with nested and top-level arrays
        # Invalid types are NULL, maps, nodes, edges, and paths
        queries = ["CREATE (a), (b) SET a.v = [b]",
                   "CREATE (a {v: ['str', [1, NULL]]})",
                   "CREATE (a {v: [[{k: 'v'}]]})",
                   "CREATE (a:L {v: [e]})-[e:R]->(:L)"]
        for query in queries:
            try:
                redis_graph.query(query)
                self.env.assertTrue(False)
            except redis.exceptions.ResponseError as e:
                self.env.assertContains("Property values can only be of primitive types or arrays of primitive types", str(e))
コード例 #23
0
class testGraphPersistency():
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_con
        redis_con = self.env.getConnection()

    def populate_graph(self, graph_name):
        redis_graph = Graph(graph_name, redis_con)
        # quick return if graph already exists
        if redis_con.exists(graph_name):
            return redis_graph

        people = ["Roi", "Alon", "Ailon", "Boaz", "Tal", "Omri", "Ori"]
        visits = [("Roi", "USA"), ("Alon", "Israel"), ("Ailon", "Japan"),
                  ("Boaz", "United Kingdom")]
        countries = ["Israel", "USA", "Japan", "United Kingdom"]
        personNodes = {}
        countryNodes = {}

        # create nodes
        for p in people:
            person = Node(label="person",
                          properties={
                              "name": p,
                              "height": random.randint(160, 200)
                          })
            redis_graph.add_node(person)
            personNodes[p] = person

        for c in countries:
            country = Node(label="country",
                           properties={
                               "name": c,
                               "population": random.randint(100, 400)
                           })
            redis_graph.add_node(country)
            countryNodes[c] = country

        # create edges
        for v in visits:
            person = v[0]
            country = v[1]
            edge = Edge(personNodes[person],
                        'visit',
                        countryNodes[country],
                        properties={'purpose': 'pleasure'})
            redis_graph.add_edge(edge)

        redis_graph.commit()

        # delete nodes, to introduce deleted entries within our datablock
        query = """MATCH (n:person) WHERE n.name = 'Roi' or n.name = 'Ailon' DELETE n"""
        redis_graph.query(query)

        query = """MATCH (n:country) WHERE n.name = 'USA' DELETE n"""
        redis_graph.query(query)

        # create indices
        redis_con.execute_command(
            "GRAPH.QUERY", graph_name,
            "CREATE INDEX FOR (p:Person) ON (p.name, p.height)")
        redis_con.execute_command(
            "GRAPH.QUERY", graph_name,
            "CREATE INDEX FOR (c:country) ON (c.name, c.population)")
        actual_result = redis_con.execute_command(
            "GRAPH.QUERY", graph_name,
            "CREATE INDEX FOR ()-[r:visit]-() ON (r.purpose)")
        actual_result = redis_con.execute_command(
            "GRAPH.QUERY", graph_name,
            "CALL db.idx.fulltext.createNodeIndex({label: 'person', stopwords: ['A', 'B'], language: 'english'}, 'text')"
        )

        return redis_graph

    def populate_dense_graph(self, graph_name):
        dense_graph = Graph(graph_name, redis_con)

        # return early if graph exists
        if redis_con.exists(graph_name):
            return dense_graph

        nodes = []
        for i in range(10):
            node = Node(label="n", properties={"val": i})
            dense_graph.add_node(node)
            nodes.append(node)

        for n_idx, n in enumerate(nodes):
            for m_idx, m in enumerate(nodes[:n_idx]):
                dense_graph.add_edge(Edge(n, "connected", m))

        dense_graph.flush()
        return dense_graph

    def test01_save_load(self):
        graph_names = ["G", "{tag}_G"]
        for graph_name in graph_names:
            graph = self.populate_graph(graph_name)
            for i in range(2):
                if i == 1:
                    # Save RDB & Load from RDB
                    self.env.dumpAndReload()

                # Verify
                # Expecting 5 person entities.
                query = """MATCH (p:person) RETURN COUNT(p)"""
                actual_result = graph.query(query)
                nodeCount = actual_result.result_set[0][0]
                self.env.assertEquals(nodeCount, 5)

                query = """MATCH (p:person) WHERE p.name='Alon' RETURN COUNT(p)"""
                actual_result = graph.query(query)
                nodeCount = actual_result.result_set[0][0]
                self.env.assertEquals(nodeCount, 1)

                # Expecting 3 country entities.
                query = """MATCH (c:country) RETURN COUNT(c)"""
                actual_result = graph.query(query)
                nodeCount = actual_result.result_set[0][0]
                self.env.assertEquals(nodeCount, 3)

                query = """MATCH (c:country) WHERE c.name = 'Israel' RETURN COUNT(c)"""
                actual_result = graph.query(query)
                nodeCount = actual_result.result_set[0][0]
                self.env.assertEquals(nodeCount, 1)

                # Expecting 2 visit edges.
                query = """MATCH (n:person)-[e:visit]->(c:country) WHERE e.purpose='pleasure' RETURN COUNT(e)"""
                actual_result = graph.query(query)
                edgeCount = actual_result.result_set[0][0]
                self.env.assertEquals(edgeCount, 2)

                # Verify indices exists
                indices = graph.query("""CALL db.indexes()""").result_set
                expected_indices = [[
                    'exact-match', 'country', ['name', 'population'],
                    'english', [], 'NODE'
                ],
                                    [
                                        'exact-match', 'person',
                                        ['name', 'height'], 'english', [],
                                        'NODE'
                                    ],
                                    [
                                        'full-text', 'person', ['text'],
                                        'english', ['a', 'b'], 'NODE'
                                    ],
                                    [
                                        'exact-match', 'visit',
                                        ['_src_id', '_dest_id', 'purpose'],
                                        'english', [], 'RELATIONSHIP'
                                    ]]

                self.env.assertEquals(len(indices), len(expected_indices))
                for index in indices:
                    self.env.assertIn(index, indices)

    # Verify that edges are not modified after entity deletion
    def test02_deleted_entity_migration(self):
        graph_names = ("H", "{tag}_H")
        for graph_name in graph_names:
            graph = self.populate_dense_graph(graph_name)

            query = """MATCH (p) WHERE ID(p) = 0 OR ID(p) = 3 OR ID(p) = 7 OR ID(p) = 9 DELETE p"""
            actual_result = graph.query(query)
            self.env.assertEquals(actual_result.nodes_deleted, 4)

            query = """MATCH (p)-[]->(q) RETURN p.val, q.val ORDER BY p.val, q.val"""
            first_result = graph.query(query)

            # Save RDB & Load from RDB
            self.env.dumpAndReload()

            second_result = graph.query(query)
            self.env.assertEquals(first_result.result_set,
                                  second_result.result_set)

    # Strings, numerics, booleans, array, and point properties should be properly serialized and reloaded
    def test03_restore_properties(self):
        graph_names = ("simple_props", "{tag}_simple_props")
        for graph_name in graph_names:
            graph = Graph(graph_name, redis_con)

            query = """CREATE (:p {strval: 'str', numval: 5.5, boolval: true, array: [1,2,3], pointval: point({latitude: 5.5, longitude: 6})})"""
            result = graph.query(query)

            # Verify that node was created correctly
            self.env.assertEquals(result.nodes_created, 1)
            self.env.assertEquals(result.properties_set, 5)

            # Save RDB & Load from RDB
            self.env.dumpAndReload()

            query = """MATCH (p) RETURN p.boolval, p.numval, p.strval, p.array, p.pointval"""
            actual_result = graph.query(query)

            # Verify that the properties are loaded correctly.
            expected_result = [[
                True, 5.5, 'str', [1, 2, 3], {
                    "latitude": 5.5,
                    "longitude": 6.0
                }
            ]]
            self.env.assertEquals(actual_result.result_set, expected_result)

    # Verify multiple edges of the same relation between nodes A and B
    # are saved and restored correctly.
    def test04_repeated_edges(self):
        graph_names = ["repeated_edges", "{tag}_repeated_edges"]
        for graph_name in graph_names:
            graph = Graph(graph_name, redis_con)
            src = Node(label='p', properties={'name': 'src'})
            dest = Node(label='p', properties={'name': 'dest'})
            edge1 = Edge(src, 'e', dest, properties={'val': 1})
            edge2 = Edge(src, 'e', dest, properties={'val': 2})

            graph.add_node(src)
            graph.add_node(dest)
            graph.add_edge(edge1)
            graph.add_edge(edge2)
            graph.flush()

            # Verify the new edge
            q = """MATCH (a)-[e]->(b) RETURN e.val, a.name, b.name ORDER BY e.val"""
            actual_result = graph.query(q)

            expected_result = [[
                edge1.properties['val'], src.properties['name'],
                dest.properties['name']
            ],
                               [
                                   edge2.properties['val'],
                                   src.properties['name'],
                                   dest.properties['name']
                               ]]

            self.env.assertEquals(actual_result.result_set, expected_result)

            # Save RDB & Load from RDB
            self.env.dumpAndReload()

            # Verify that the latest edge was properly saved and loaded
            actual_result = graph.query(q)
            self.env.assertEquals(actual_result.result_set, expected_result)

    # Verify that graphs larger than the
    # default capacity are persisted correctly.
    def test05_load_large_graph(self):
        graph_name = "LARGE_GRAPH"
        graph = Graph(graph_name, redis_con)
        q = """UNWIND range(1, 50000) AS v CREATE (:L)-[:R {v: v}]->(:L)"""
        actual_result = graph.query(q)
        self.env.assertEquals(actual_result.nodes_created, 100_000)
        self.env.assertEquals(actual_result.relationships_created, 50_000)

        # Save RDB & Load from RDB
        self.env.dumpAndReload()

        expected_result = [[50000]]

        queries = [
            """MATCH (:L)-[r {v: 50000}]->(:L) RETURN r.v""",
            """MATCH (:L)-[r:R {v: 50000}]->(:L) RETURN r.v""",
            """MATCH ()-[r:R {v: 50000}]->() RETURN r.v"""
        ]

        for q in queries:
            actual_result = graph.query(q)
            self.env.assertEquals(actual_result.result_set, expected_result)

    # Verify that graphs created using the GRAPH.BULK endpoint are persisted correctly
    def test06_bulk_insert(self):
        graphname = "bulk_inserted_graph"
        runner = CliRunner()

        csv_path = os.path.dirname(os.path.abspath(
            __file__)) + '/../../demo/social/resources/bulk_formatted/'
        res = runner.invoke(bulk_insert, [
            '--nodes', csv_path + 'Person.csv', '--nodes',
            csv_path + 'Country.csv', '--relations', csv_path + 'KNOWS.csv',
            '--relations', csv_path + 'VISITED.csv', graphname
        ])

        # The script should report 27 node creations and 56 edge creations
        self.env.assertEquals(res.exit_code, 0)
        self.env.assertIn('27 nodes created', res.output)
        self.env.assertIn('56 relations created', res.output)

        # Restart the server
        self.env.dumpAndReload()

        graph = Graph(graphname, redis_con)

        query_result = graph.query("""MATCH (p:Person)
                                      RETURN p.name, p.age, p.gender, p.status, ID(p)
                                      ORDER BY p.name""")

        # Verify that the Person label exists, has the correct attributes
        # and is properly populated
        expected_result = [['Ailon Velger', 32, 'male', 'married', 2],
                           ['Alon Fital', 32, 'male', 'married', 1],
                           ['Boaz Arad', 31, 'male', 'married', 4],
                           ['Gal Derriere', 26, 'male', 'single', 11],
                           ['Jane Chernomorin', 31, 'female', 'married', 8],
                           ['Lucy Yanfital', 30, 'female', 'married', 7],
                           ['Mor Yesharim', 31, 'female', 'married', 12],
                           ['Noam Nativ', 34, 'male', 'single', 13],
                           ['Omri Traub', 33, 'male', 'single', 5],
                           ['Ori Laslo', 32, 'male', 'married', 3],
                           ['Roi Lipman', 32, 'male', 'married', 0],
                           ['Shelly Laslo Rooz', 31, 'female', 'married', 9],
                           ['Tal Doron', 32, 'male', 'single', 6],
                           [
                               'Valerie Abigail Arad', 31, 'female', 'married',
                               10
                           ]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Verify that the Country label exists, has the correct attributes, and is properly populated
        query_result = graph.query(
            'MATCH (c:Country) RETURN c.name, ID(c) ORDER BY c.name')
        expected_result = [['Andora', 21], ['Canada', 18], ['China', 19],
                           ['Germany', 24], ['Greece', 17], ['Italy', 25],
                           ['Japan', 16], ['Kazakhstan', 22],
                           ['Netherlands', 20], ['Prague', 15], ['Russia', 23],
                           ['Thailand', 26], ['USA', 14]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Validate that the expected relations and properties have been constructed
        query_result = graph.query(
            'MATCH (a)-[e:KNOWS]->(b) RETURN a.name, e.relation, b.name ORDER BY e.relation, a.name, b.name'
        )

        expected_result = [['Ailon Velger', 'friend', 'Noam Nativ'],
                           ['Alon Fital', 'friend', 'Gal Derriere'],
                           ['Alon Fital', 'friend', 'Mor Yesharim'],
                           ['Boaz Arad', 'friend', 'Valerie Abigail Arad'],
                           ['Roi Lipman', 'friend', 'Ailon Velger'],
                           ['Roi Lipman', 'friend', 'Alon Fital'],
                           ['Roi Lipman', 'friend', 'Boaz Arad'],
                           ['Roi Lipman', 'friend', 'Omri Traub'],
                           ['Roi Lipman', 'friend', 'Ori Laslo'],
                           ['Roi Lipman', 'friend', 'Tal Doron'],
                           ['Ailon Velger', 'married', 'Jane Chernomorin'],
                           ['Alon Fital', 'married', 'Lucy Yanfital'],
                           ['Ori Laslo', 'married', 'Shelly Laslo Rooz']]
        self.env.assertEquals(query_result.result_set, expected_result)

        query_result = graph.query(
            'MATCH (a)-[e:VISITED]->(b) RETURN a.name, e.purpose, b.name ORDER BY e.purpose, a.name, b.name'
        )

        expected_result = [['Alon Fital', 'business', 'Prague'],
                           ['Alon Fital', 'business', 'USA'],
                           ['Boaz Arad', 'business', 'Netherlands'],
                           ['Boaz Arad', 'business', 'USA'],
                           ['Gal Derriere', 'business', 'Netherlands'],
                           ['Jane Chernomorin', 'business', 'USA'],
                           ['Lucy Yanfital', 'business', 'USA'],
                           ['Mor Yesharim', 'business', 'Germany'],
                           ['Ori Laslo', 'business', 'China'],
                           ['Ori Laslo', 'business', 'USA'],
                           ['Roi Lipman', 'business', 'Prague'],
                           ['Roi Lipman', 'business', 'USA'],
                           ['Tal Doron', 'business', 'Japan'],
                           ['Tal Doron', 'business', 'USA'],
                           ['Alon Fital', 'pleasure', 'Greece'],
                           ['Alon Fital', 'pleasure', 'Prague'],
                           ['Alon Fital', 'pleasure', 'USA'],
                           ['Boaz Arad', 'pleasure', 'Netherlands'],
                           ['Boaz Arad', 'pleasure', 'USA'],
                           ['Jane Chernomorin', 'pleasure', 'Greece'],
                           ['Jane Chernomorin', 'pleasure', 'Netherlands'],
                           ['Jane Chernomorin', 'pleasure', 'USA'],
                           ['Lucy Yanfital', 'pleasure', 'Kazakhstan'],
                           ['Lucy Yanfital', 'pleasure', 'Prague'],
                           ['Lucy Yanfital', 'pleasure', 'USA'],
                           ['Mor Yesharim', 'pleasure', 'Greece'],
                           ['Mor Yesharim', 'pleasure', 'Italy'],
                           ['Noam Nativ', 'pleasure', 'Germany'],
                           ['Noam Nativ', 'pleasure', 'Netherlands'],
                           ['Noam Nativ', 'pleasure', 'Thailand'],
                           ['Omri Traub', 'pleasure', 'Andora'],
                           ['Omri Traub', 'pleasure', 'Greece'],
                           ['Omri Traub', 'pleasure', 'USA'],
                           ['Ori Laslo', 'pleasure', 'Canada'],
                           ['Roi Lipman', 'pleasure', 'Japan'],
                           ['Roi Lipman', 'pleasure', 'Prague'],
                           ['Shelly Laslo Rooz', 'pleasure', 'Canada'],
                           ['Shelly Laslo Rooz', 'pleasure', 'China'],
                           ['Shelly Laslo Rooz', 'pleasure', 'USA'],
                           ['Tal Doron', 'pleasure', 'Andora'],
                           ['Tal Doron', 'pleasure', 'USA'],
                           ['Valerie Abigail Arad', 'pleasure', 'Netherlands'],
                           ['Valerie Abigail Arad', 'pleasure', 'Russia']]
        self.env.assertEquals(query_result.result_set, expected_result)

    # Verify that nodes with multiple labels are saved and restored correctly.
    def test07_persist_multiple_labels(self):
        graph_id = "multiple_labels"
        g = Graph(graph_id, redis_con)
        q = "CREATE (a:L0:L1:L2)"
        actual_result = g.query(q)
        self.env.assertEquals(actual_result.nodes_created, 1)
        self.env.assertEquals(actual_result.labels_added, 3)

        # Verify the new node
        q = "MATCH (a) RETURN LABELS(a)"
        actual_result = g.query(q)
        expected_result = [[["L0", "L1", "L2"]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Save RDB & Load from RDB
        self.env.dumpAndReload()

        # Verify that the graph was properly saved and loaded
        actual_result = g.query(q)
        self.env.assertEquals(actual_result.result_set, expected_result)

        queries = [
            "MATCH (a:L0) RETURN count(a)", "MATCH (a:L1) RETURN count(a)",
            "MATCH (a:L2) RETURN count(a)", "MATCH (a:L0:L0) RETURN count(a)",
            "MATCH (a:L0:L1) RETURN count(a)",
            "MATCH (a:L0:L2) RETURN count(a)",
            "MATCH (a:L1:L0) RETURN count(a)",
            "MATCH (a:L1:L1) RETURN count(a)",
            "MATCH (a:L1:L2) RETURN count(a)",
            "MATCH (a:L2:L0) RETURN count(a)",
            "MATCH (a:L2:L1) RETURN count(a)",
            "MATCH (a:L2:L2) RETURN count(a)",
            "MATCH (a:L0:L1:L2) RETURN count(a)"
        ]

        for q in queries:
            actual_result = g.query(q)
            self.env.assertEquals(actual_result.result_set[0], [1])
コード例 #24
0
class testOptimizationsPlan(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global graph
        global redis_con
        redis_con = self.env.getConnection()
        graph = Graph("g", redis_con)
        self.populate_graph()

    def populate_graph(self):
        global graph
        nodes = {}
        # Create entities
        for idx, p in enumerate(people):
            node = Node(label="person", properties={"name": p, "val": idx})
            graph.add_node(node)
            nodes[p] = node

        # Fully connected graph
        for src in nodes:
            for dest in nodes:
                if src != dest:
                    edge = Edge(nodes[src], "know", nodes[dest])
                    graph.add_edge(edge)

        for src in nodes:
            for dest in nodes:
                if src != dest:
                    edge = Edge(nodes[src], "works_with", nodes[dest])
                    graph.add_edge(edge)

        graph.commit()
        query = """MATCH (a)-[:know]->(b) CREATE (a)-[:know]->(b)"""
        graph.query(query)

    def test01_typeless_edge_count(self):
        query = """MATCH ()-[r]->() RETURN COUNT(r)"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Conditional Traverse", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[36]]
        self.env.assertEqual(resultset, expected)

    def test02_typed_edge_count(self):
        query = """MATCH ()-[r:know]->() RETURN COUNT(r)"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Conditional Traverse", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[24]]
        self.env.assertEqual(resultset, expected)

    def test03_unknown_typed_edge_count(self):
        query = """MATCH ()-[r:unknown]->() RETURN COUNT(r)"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Conditional Traverse", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[0]]
        self.env.assertEqual(resultset, expected)

    def test04_typeless_edge_count_with_alias(self):
        query = """MATCH ()-[r]->() RETURN COUNT(r) as c"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Conditional Traverse", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[36]]
        self.env.assertEqual(resultset, expected)

    def test05_typed_edge_count_with_alias(self):
        query = """MATCH ()-[r:know]->() RETURN COUNT(r) as c"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Conditional Traverse", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[24]]
        self.env.assertEqual(resultset, expected)

    def test06_multiple_typed_edge_count_with_alias(self):
        query = """MATCH ()-[r:know | :works_with]->() RETURN COUNT(r) as c"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Conditional Traverse", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[36]]
        self.env.assertEqual(resultset, expected)

    def test07_count_unreferenced_edge(self):
        query = """MATCH ()-[:know]->(b) RETURN COUNT(b)"""
        # This count in this query cannot be reduced, as the traversal op doesn't store
        # data about non-referenced edges.
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        # Verify that the optimization was not applied.
        self.env.assertNotIn("Project", executionPlan)
        self.env.assertIn("Aggregate", executionPlan)
        self.env.assertIn("All Node Scan", executionPlan)
        self.env.assertIn("Conditional Traverse", executionPlan)
        expected = [[12]]
        self.env.assertEqual(resultset, expected)

    def test08_non_labeled_node_count(self):
        query = """MATCH (n) RETURN COUNT(n)"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Node By Label Scan", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[4]]
        self.env.assertEqual(resultset, expected)

    def test09_non_labeled_node_count_with_alias(self):
        query = """MATCH (n) RETURN COUNT(n) as c"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Node By Label Scan", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[4]]
        self.env.assertEqual(resultset, expected)

    def test10_labled_node_count(self):
        query = """MATCH (n:person) RETURN COUNT(n)"""
        resultset = graph.query(query).result_set
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Project", executionPlan)
        self.env.assertIn("Results", executionPlan)
        self.env.assertNotIn("All Node Scan", executionPlan)
        self.env.assertNotIn("Node By Label Scan", executionPlan)
        self.env.assertNotIn("Aggregate", executionPlan)
        expected = [[4]]
        self.env.assertEqual(resultset, expected)

    def test11_value_hash_join(self):
        # Issue a query that joins two streams on a node property.
        query = """MATCH (p1:person)-[:know]->({name: 'Roi'}), (p2)-[]->(:person {name: 'Alon'}) WHERE p1.name = p2.name RETURN p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Value Hash Join", executionPlan)
        self.env.assertNotIn("Cartesian Product", executionPlan)

        resultset = graph.query(query).result_set
        expected = [['Ailon'], ['Boaz']]
        self.env.assertEqual(resultset, expected)

        # Issue a query that joins two streams on a function call.
        query = """MATCH (p1:person)-[:know]->({name: 'Roi'}) MATCH (p2)-[]->(:person {name: 'Alon'}) WHERE ID(p1) = ID(p2) RETURN p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Value Hash Join", executionPlan)
        self.env.assertNotIn("Cartesian Product", executionPlan)

        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected) # same results expected

        # Validate identical results in a query that doesn't leverage this optimization.
        # TODO this query could in the future be optimized with a "Node Hash Join"
        query = """MATCH (p1:person)-[:know]->({name: 'Roi'}) MATCH (p2)-[]->(:person {name: 'Alon'}) WHERE p1 = p2 RETURN p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertNotIn("Value Hash Join", executionPlan)
        self.env.assertIn("Cartesian Product", executionPlan)

        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected) # same results expected

    def test12_multiple_stream_value_hash_join(self):
        # Issue a query that joins three streams.
        query = """MATCH (p1:person)-[:know]->({name: 'Roi'}), (p2)-[]->(:person {name: 'Alon'}), (p3) WHERE p1.name = p2.name AND ID(p2) = ID(p3) RETURN p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Value Hash Join", executionPlan)
        self.env.assertNotIn("Cartesian Product", executionPlan)

        resultset = graph.query(query).result_set
        expected = [['Ailon'], ['Boaz']]
        self.env.assertEqual(resultset, expected)

        # Issue a query that joins four streams that all resolve the same entity.
        query = """MATCH (p1 {name: 'Ailon'}), (p2), (p3), (p4) WHERE ID(p1) = ID(p2) AND ID(p2) = ID(p3) AND p3.name = p4.name RETURN p4.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Value Hash Join", executionPlan)
        self.env.assertNotIn("Cartesian Product", executionPlan)

        expected = [['Ailon']]
        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected)

        # Issue a query that joins four streams that all resolve the same entity, with multiple reapeating filter (issue #869).
        query = """MATCH (p1 {name: 'Ailon'}), (p2), (p3), (p4) WHERE ID(p1) = ID(p2) AND ID(p2) = ID(p3) AND ID(p3)=ID(p2) AND ID(p2)= ID(p1) AND p3.name = p4.name AND p4.name = p3.name RETURN p4.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Value Hash Join", executionPlan)
        self.env.assertNotIn("Cartesian Product", executionPlan)

        expected = [['Ailon']]
        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected)

    def test13_duplicate_filter_placement(self):
        # Issue a query that joins three streams and contains a redundant filter.
        query = """MATCH (p0), (p1), (p2) where id(p2) = id(p0) AND id(p1) = id(p2) AND id(p1) = id(p2) return p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Value Hash Join", executionPlan)
        self.env.assertNotIn("Cartesian Product", executionPlan)

        resultset = graph.query(query).result_set
        expected = [['Ailon'], ['Alon'], ['Boaz'], ['Roi']]
        self.env.assertEqual(resultset, expected)

    def test14_distinct_aggregations(self):
        # Verify that the Distinct operation is removed from the aggregating query.
        query = """MATCH (src:person)-[:know]->(dest) RETURN DISTINCT src.name, COUNT(dest) ORDER BY src.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Aggregate", executionPlan)
        self.env.assertNotIn("Distinct", executionPlan)

        resultset = graph.query(query).result_set
        expected = [['Ailon', 3],
                    ['Alon', 3],
                    ['Boaz', 3],
                    ['Roi', 3]]
        self.env.assertEqual(resultset, expected)


        # Verify that the Distinct operation is not removed from a valid projection.
        query = """MATCH (src:person) WITH DISTINCT src MATCH (src)-[:know]->(dest) RETURN src.name, COUNT(dest) ORDER BY src.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Aggregate", executionPlan)
        self.env.assertIn("Distinct", executionPlan)

        resultset = graph.query(query).result_set
        # This query should emit the same result.
        self.env.assertEqual(resultset, expected)

    def test15_test_splitting_cartesian_product(self):
        query = """MATCH (p1), (p2), (p3) WHERE p1.name <> p2.name AND p2.name <> p3.name RETURN DISTINCT p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertEqual(2, executionPlan.count("Cartesian Product"))
        expected = [['Ailon'],
                    ['Alon'],
                    ['Boaz'],
                    ['Roi']]
        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected)
    
    def test16_test_splitting_cartesian_product_with_multiple_filters(self):
        query = """MATCH (p1), (p2), (p3) WHERE p1.name <> p2.name AND ID(p1) <> ID(p2) RETURN DISTINCT p2.name ORDER BY p2.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertEqual(2, executionPlan.count("Cartesian Product"))
        expected = [['Ailon'],
                    ['Alon'],
                    ['Boaz'],
                    ['Roi']]
        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected)

    def test17_test_multiple_branch_filter_cp_optimization(self):
        query = """MATCH (p1), (p2), (p3), (p4) WHERE p1.val + p2.val = p3.val AND p3.val > 0 RETURN DISTINCT p3.name ORDER BY p3.name"""
        executionPlan = graph.execution_plan(query)
        self.env.assertEqual(2, executionPlan.count("Cartesian Product"))
        expected = [['Ailon'],
                    ['Alon'],
                    ['Boaz']]
        resultset = graph.query(query).result_set
        self.env.assertEqual(resultset, expected)

    def test18_test_semi_apply_and_cp_optimize(self):
        graph.query ("CREATE ({val:0}), ({val:1})-[:R]->({val:2})-[:R]->({val:3})")
        # The next query generates the execution plan:
        # 1) "Results"
        # 2) "    Sort"
        # 3) "        Distinct"
        # 4) "            Project"
        # 5) "                Semi Apply"
        # 6) "                    Cartesian Product"
        # 7) "                        All Node Scan | (n4)"
        # 8) "                        Filter"
        # 9) "                            Cartesian Product"
        # 10) "                                All Node Scan | (n1)"
        # 11) "                                Filter"
        # 12) "                                    All Node Scan | (n3)"
        # 13) "                                All Node Scan | (n2)"
        # 14) "                    Expand Into | (n3)->(n4)"
        # 15) "                        Filter"
        # 16) "                            Argument"
        # We want to make sure the optimization is not misplacing the semi apply bounded branch.
        resultset = graph.query("MATCH (n1), (n2), (n3), (n4) WHERE (n3)-[:R]->(n4 {val:n3.val+1}) AND n1.val + n2.val = n3.val AND n3.val > 1  RETURN DISTINCT n3.val ORDER BY n3.val").result_set
        expected = [[2]]
        self.env.assertEqual(resultset, expected)
    
    def test19_test_filter_compaction_remove_true_filter(self):
        query = "MATCH (n) WHERE 1 = 1 RETURN n"
        executionPlan = graph.execution_plan(query)
        self.env.assertNotIn("Filter", executionPlan)

    def test20_test_filter_compaction_not_removing_false_filter(self):
        query = "MATCH (n) WHERE 1 > 1 RETURN n"
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Filter", executionPlan)
        resultset = graph.query(query).result_set
        expected = []
        self.env.assertEqual(resultset, expected)

    # ExpandInto should be applied where possible on projected graph entities.
    def test21_expand_into_projected_endpoints(self):
        query = """MATCH (a)-[]->(b) WITH a, b MATCH (a)-[e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val LIMIT 3"""
        executionPlan = graph.execution_plan(query)
        self.env.assertIn("Expand Into", executionPlan)
        resultset = graph.query(query).result_set
        expected = [[0, 1],
                    [0, 2],
                    [0, 3]]
        self.env.assertEqual(resultset, expected)

    # Variables bound in one scope should not be used to introduce ExpandInto ops in later scopes.
    def test22_no_expand_into_across_scopes(self):
        query = """MATCH (reused_1)-[]->(reused_2) WITH COUNT(reused_2) as edge_count MATCH (reused_1)-[]->(reused_2) RETURN edge_count, reused_1.val, reused_2.val ORDER BY reused_1.val, reused_2.val LIMIT 3"""
        executionPlan = graph.execution_plan(query)
        self.env.assertNotIn("Expand Into", executionPlan)
        resultset = graph.query(query).result_set
        expected = [[14, 0, 1],
                    [14, 0, 2],
                    [14, 0, 3]]
        self.env.assertEqual(resultset, expected)

    # Test limit propagation, execution-plan operations such as
    # conditional traverse accumulate a batch of records before processing
    # knowladge about limit can benifit such operation as they can reduce
    # their batch size to match the current limit.
    def test23_limit_propagation(self):
        graph_id = "limit-propagation"
        graph = Graph(graph_id, redis_con)

        # create graph
        query = """UNWIND range(0, 64) AS x CREATE ()-[:R]->()-[:R]->()"""
        graph.query(query)

        # query with LIMIT 1
        query = """CYPHER l=1 MATCH (a)-[]->(b) WITH b AS b
        MATCH (b)-[]->(c) RETURN c LIMIT $l"""

        # profile query
        profile = redis_con.execute_command("GRAPH.PROFILE", graph_id, query)
        profile = [x[0:x.index(',')].strip() for x in profile]

        # make sure 'a' to 'b' traversal operation is aware of limit
        self.env.assertIn("Conditional Traverse | (a)->(b) | Records produced: 1", profile)

        # query with LIMIT 1
        query = """CYPHER l=1 MATCH (a), (b) WITH a AS a, b AS b
        MATCH (a)-[]->(b) WITH b AS b MATCH (b)-[]->(c) RETURN c LIMIT $l"""

        # profile query
        profile = redis_con.execute_command("GRAPH.PROFILE", graph_id, query)
        profile = [x[0:x.index(',')].strip() for x in profile]

        # make sure 'a' to 'b' expand into traversal operation is aware of limit
        self.env.assertIn("Expand Into | (a)->(b) | Records produced: 1", profile)

        # aggregation should reset limit, otherwise we'll take a performance hit
        # recall aggregation operations are eager
        query = """CYPHER l=1 MATCH (a)-[]->(b) WITH count(a) AS src, b AS b
        MATCH (b)-[]->(c) RETURN c LIMIT $l"""

        # profile query
        profile = redis_con.execute_command("GRAPH.PROFILE", graph_id, query)
        profile = [x[0:x.index(',')].strip() for x in profile]

        # traversal from a to b shouldn't be effected by the limit.
        self.env.assertNotIn("Conditional Traverse | (a)->(b) | Records produced: 64", profile)
コード例 #25
0
class testParams(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def setUp(self):
        self.env.flush()

    def test_simple_params(self):
        params = [1, 2.3, -1, -2.3, "str", True, False, None, [0, 1, 2]]
        query = "RETURN $param"
        for param in params:
            expected_results = [[param]]
            query_info = QueryInfo(query=query,
                                   description="Tests simple params",
                                   expected_result=expected_results)
            self._assert_resultset_equals_expected(
                redis_graph.query(query, {'param': param}), query_info)

    def test_invalid_param(self):
        invalid_queries = [
            "CYPHER param=a RETURN $param",  # 'a' is undefined
            "CYPHER param=a MATCH (a) RETURN $param",  # 'a' is undefined
            "CYPHER param=f(1) RETURN $param",  # 'f' doesn't exists
            "CYPHER param=2+f(1) RETURN $param",  # 'f' doesn't exists
            "CYPHER param=[1, f(1)] UNWIND $param AS x RETURN x",  # 'f' doesn't exists
            "CYPHER param=[1, [2, f(1)]] UNWIND $param AS x RETURN x",  # 'f' doesn't exists
            "CYPHER param={'key':f(1)} RETURN $param",  # 'f' doesn't exists
            "CYPHER param=1*'a' RETURN $param",  # 1*'a' isn't defined
            "CYPHER param=abs(1)+f(1) RETURN $param",  # 'f' doesn't exists
            "CYPHER param= RETURN 1",  # undefined parameter
            "CYPHER param=count(1) RETURN $param"  # aggregation function can't be used as a parameter
            "CYPHER param=2+count(1) RETURN $param",  # aggregation function can't be used as a parameter
            "CYPHER param=[1, count(1)] UNWIND $param AS x RETURN x",  # aggregation function can't be used as a parameter
            "CYPHER param={'key':count(1)} RETURN $param",  # aggregation function can't be used as a parameter
            "CYPHER param={'key':1*'a'} RETURN $param",  # 1*'a' isn't defined
            "CYPHER param=[1, 1*'a'] UNWIND $param AS x RETURN x",  # 1*'a' isn't defined
            "CYPHER param={'key':a} RETURN $param",  # 'a' isn't defined
            "CYPHER param=[1, a] UNWIND $param AS x RETURN x",  # 'a' isn't defined
            "CYPHER param0=1 param1=$param0 RETURN $param1"  # paramers shouldn't refer to one another
        ]
        for q in invalid_queries:
            try:
                result = redis_graph.query(q)
                assert (False)
            except redis.exceptions.ResponseError as e:
                pass

    def test_expression_on_param(self):
        params = {'param': 1}
        query = "RETURN $param + 1"
        expected_results = [[2]]

        query_info = QueryInfo(query=query,
                               description="Tests expression on param",
                               expected_result=expected_results)
        self._assert_resultset_equals_expected(
            redis_graph.query(query, params), query_info)

    def test_node_retrival(self):
        p0 = Node(node_id=0, label="Person", properties={'name': 'a'})
        p1 = Node(node_id=1, label="Person", properties={'name': 'b'})
        p2 = Node(node_id=2, label="NoPerson", properties={'name': 'a'})
        redis_graph.add_node(p0)
        redis_graph.add_node(p1)
        redis_graph.add_node(p2)
        redis_graph.flush()

        params = {'name': 'a'}
        query = "MATCH (n :Person {name:$name}) RETURN n"
        expected_results = [[p0]]

        query_info = QueryInfo(query=query,
                               description="Tests expression on param",
                               expected_result=expected_results)
        self._assert_resultset_equals_expected(
            redis_graph.query(query, params), query_info)

    def test_parameterized_skip_limit(self):
        params = {'skip': 1, 'limit': 1}
        query = "UNWIND [1,2,3] AS X RETURN X SKIP $skip LIMIT $limit"
        expected_results = [[2]]

        query_info = QueryInfo(query=query,
                               description="Tests skip limit as params",
                               expected_result=expected_results)
        self._assert_resultset_equals_expected(
            redis_graph.query(query, params), query_info)

        # Set one parameter to non-integer value
        params = {'skip': '1', 'limit': 1}
        try:
            redis_graph.query(query, params)
            assert (False)
        except redis.exceptions.ResponseError as e:
            pass

    def test_missing_parameter(self):
        # Make sure missing parameters are reported back as an error.
        query = "RETURN $missing"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        try:
            redis_graph.profile(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        try:
            redis_graph.execution_plan(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        query = "MATCH (a) WHERE a.v = $missing RETURN a"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

        query = "MATCH (a) SET a.v = $missing RETURN a"
        try:
            redis_graph.query(query)
            assert (False)
        except:
            # Expecting an error.
            pass

    def test_id_scan(self):
        redis_graph.query("CREATE ({val:1})")
        expected_results = [[1]]
        params = {'id': 0}
        query = "MATCH (n) WHERE id(n)=$id return n.val"
        query_info = QueryInfo(query=query,
                               description="Test id scan with params",
                               expected_result=expected_results)
        self._assert_resultset_equals_expected(
            redis_graph.query(query, params), query_info)
        plan = redis_graph.execution_plan(query, params=params)
        self.env.assertIn('NodeByIdSeek', plan)
コード例 #26
0
class testIndexScanFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)

    def setUp(self):
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(social_utils.graph_name, redis_con)
        social_utils.populate_graph(redis_con, redis_graph)
        self.build_indices()

    def tearDown(self):
        self.env.cmd('flushall')

    def build_indices(self):
        global redis_graph
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social",
                                              "CREATE INDEX ON :person(age)")
        redis_graph.redis_con.execute_command(
            "GRAPH.QUERY", "social", "CREATE INDEX ON :country(name)")

    # Validate that Cartesian products using index and label scans succeed
    def test01_cartesian_product_mixed_scans(self):
        query = "MATCH (p:person), (c:country) WHERE p.age > 0 RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)
        indexed_result = redis_graph.query(query)

        query = "MATCH (p:person), (c:country) RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertNotIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)
        unindexed_result = redis_graph.query(query)

        self.env.assertEquals(indexed_result.result_set,
                              unindexed_result.result_set)

    # Validate that Cartesian products using just index scans succeed
    def test02_cartesian_product_index_scans_only(self):
        query = "MATCH (p:person), (c:country) WHERE p.age > 0 AND c.name > '' RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        # The two streams should both use index scans
        self.env.assertEquals(plan.count('Index Scan'), 2)
        self.env.assertNotIn('Label Scan', plan)
        indexed_result = redis_graph.query(query)

        query = "MATCH (p:person), (c:country) RETURN p.age, c.name ORDER BY p.age, c.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertNotIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)
        unindexed_result = redis_graph.query(query)

        self.env.assertEquals(indexed_result.result_set,
                              unindexed_result.result_set)

    # Validate that the appropriate bounds are respected when a Cartesian product uses the same index in two streams
    def test03_cartesian_product_reused_index(self):
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social",
                                              "CREATE INDEX ON :person(name)")
        query = "MATCH (a:person {name: 'Omri Traub'}), (b:person) WHERE b.age <= 30 RETURN a.name, b.name ORDER BY a.name, b.name"
        plan = redis_graph.execution_plan(query)
        # The two streams should both use index scans
        self.env.assertEquals(plan.count('Index Scan'), 2)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Omri Traub', 'Gal Derriere'],
                           ['Omri Traub', 'Lucy Yanfital']]
        result = redis_graph.query(query)

        self.env.assertEquals(result.result_set, expected_result)

    # Validate index utilization when filtering on a numeric field with the `IN` keyword.
    def test04_test_in_operator_numerics(self):
        # Validate the transformation of IN to multiple OR expressions.
        query = "MATCH (p:person) WHERE p.age IN [1,2,3] RETURN p"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        # Validate that nested arrays are not scanned in index.
        query = "MATCH (p:person) WHERE p.age IN [[1,2],3] RETURN p"
        plan = redis_graph.execution_plan(query)
        self.env.assertNotIn('Index Scan', plan)
        self.env.assertIn('Label Scan', plan)

        # Validate the transformation of IN to multiple OR, over a range.
        query = "MATCH (p:person) WHERE p.age IN range(0,30) RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of IN to empty index iterator.
        query = "MATCH (p:person) WHERE p.age IN [] RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = []
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of IN OR IN to empty index iterators.
        query = "MATCH (p:person) WHERE p.age IN [] OR p.age IN [] RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = []
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of multiple IN filters.
        query = "MATCH (p:person) WHERE p.age IN [26, 27, 30] OR p.age IN [33, 34, 35] RETURN p.name ORDER BY p.age"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital'], ['Omri Traub'],
                           ['Noam Nativ']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate the transformation of multiple IN filters.
        query = "MATCH (p:person) WHERE p.age IN [26, 27, 30] OR p.age IN [33, 34, 35] OR p.age IN [] RETURN p.name ORDER BY p.age"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital'], ['Omri Traub'],
                           ['Noam Nativ']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

    # Validate index utilization when filtering on string fields with the `IN` keyword.
    def test05_test_in_operator_string_props(self):
        # Build an index on the name property.
        redis_graph.redis_con.execute_command("GRAPH.QUERY", "social",
                                              "CREATE INDEX ON :person(name)")
        # Validate the transformation of IN to multiple OR expressions over string properties.
        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Combine numeric and string filters specified by IN.
        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] AND p.age in [30] RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Lucy Yanfital']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Validate an empty index on IN with multiple indexes
        query = "MATCH (p:person) WHERE p.name IN [] OR p.age IN [] RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)

        expected_result = []
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Combine IN filters with other relational filters.
        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] AND p.name < 'H' RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Gal Derriere']]
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        query = "MATCH (p:person) WHERE p.name IN ['Gal Derriere', 'Lucy Yanfital'] OR p.age = 33 RETURN p.name ORDER BY p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)

        expected_result = [['Gal Derriere'], ['Lucy Yanfital'], ['Omri Traub']]
        result = redis_graph.query(query)
        result = redis_graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

    # ',' is the default separator for tag indices
    # we've updated our separator to '\0' this test verifies issue 696:
    # https://github.com/RedisGraph/RedisGraph/issues/696
    def test06_tag_separator(self):
        redis_con = self.env.getConnection()
        redis_graph = Graph("G", redis_con)

        # Create a single node with a long string property, introduce a comma as part of the string.
        query = """CREATE (:Node{value:"A ValuePartition is a pattern that describes a restricted set of classes from which a property can be associated. The parent class is used in restrictions, and the covering axiom means that only members of the subclasses may be used as values."})"""
        redis_graph.query(query)

        # Index property.
        query = """CREATE INDEX ON :Node(value)"""
        redis_graph.query(query)

        # Make sure node is returned by index scan.
        query = """MATCH (a:Node{value:"A ValuePartition is a pattern that describes a restricted set of classes from which a property can be associated. The parent class is used in restrictions, and the covering axiom means that only members of the subclasses may be used as values."}) RETURN a"""
        plan = redis_graph.execution_plan(query)
        result_set = redis_graph.query(query).result_set
        self.env.assertIn('Index Scan', plan)
        self.env.assertEqual(len(result_set), 1)

    def test07_index_scan_and_id(self):
        redis_con = self.env.getConnection()
        redis_graph = Graph("G", redis_con)
        nodes = []
        for i in range(10):
            node = Node(node_id=i, label='person', properties={'age': i})
            nodes.append(node)
            redis_graph.add_node(node)
            redis_graph.flush()

        query = """CREATE INDEX ON :person(age)"""
        query_result = redis_graph.query(query)
        self.env.assertEqual(1, query_result.indices_created)

        query = """MATCH (n:person) WHERE id(n)>=7 AND n.age<9 RETURN n ORDER BY n.age"""
        plan = redis_graph.execution_plan(query)
        query_result = redis_graph.query(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertIn('Filter', plan)
        query_result = redis_graph.query(query)

        self.env.assertEqual(2, len(query_result.result_set))
        expected_result = [[nodes[7]], [nodes[8]]]
        self.env.assertEquals(expected_result, query_result.result_set)

    # Validate placement of index scans and filter ops when not all filters can be replaced.
    def test08_index_scan_multiple_filters(self):
        query = "MATCH (p:person) WHERE p.age = 30 AND NOT EXISTS(p.fakeprop) RETURN p.name"
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        self.env.assertNotIn('Label Scan', plan)
        self.env.assertIn('Filter', plan)

        query_result = redis_graph.query(query)
        expected_result = ["Lucy Yanfital"]
        self.env.assertEquals(query_result.result_set[0], expected_result)

    def test09_index_scan_with_params(self):
        query = "MATCH (p:person) WHERE p.age = $age RETURN p.name"
        params = {'age': 30}
        query = redis_graph.build_params_header(params) + query
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(query)
        expected_result = ["Lucy Yanfital"]
        self.env.assertEquals(query_result.result_set[0], expected_result)

    def test10_index_scan_with_param_array(self):
        query = "MATCH (p:person) WHERE p.age in $ages RETURN p.name"
        params = {'ages': [30]}
        query = redis_graph.build_params_header(params) + query
        plan = redis_graph.execution_plan(query)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(query)
        expected_result = ["Lucy Yanfital"]
        self.env.assertEquals(query_result.result_set[0], expected_result)

    def test11_single_index_multiple_scans(self):
        query = "MERGE (p1:person {age: 40}) MERGE (p2:person {age: 41})"
        plan = redis_graph.execution_plan(query)
        # Two index scans should be performed.
        self.env.assertEqual(plan.count("Index Scan"), 2)

        query_result = redis_graph.query(query)
        # Two new nodes should be created.
        self.env.assertEquals(query_result.nodes_created, 2)

    def test12_remove_scans_before_index(self):
        query = "MATCH (a:person {age: 32})-[]->(b) WHERE (b:person)-[]->(a) RETURN a"
        plan = redis_graph.execution_plan(query)
        # One index scan should be performed.
        self.env.assertEqual(plan.count("Index Scan"), 1)

    def test13_point_index_scan(self):
        # create index
        q = "CREATE INDEX ON :restaurant(location)"
        redis_graph.query(q)

        # create restaurant
        q = "CREATE (:restaurant {location: point({latitude:30.27822306, longitude:-97.75134723})})"
        redis_graph.query(q)

        # locate other restaurants within a 1000m radius
        q = """MATCH (r:restaurant)
        WHERE distance(r.location, point({latitude:30.27822306, longitude:-97.75134723})) < 1000
        RETURN r"""

        # make sure index is used
        plan = redis_graph.execution_plan(q)
        self.env.assertIn("Index Scan", plan)

        # refine query from '<' to '<='
        q = """MATCH (r:restaurant)
        WHERE distance(r.location, point({latitude:30.27822306, longitude:-97.75134723})) <= 1000
        RETURN r"""

        # make sure index is used
        plan = redis_graph.execution_plan(q)
        self.env.assertIn("Index Scan", plan)

        # index should NOT be used when searching for points outside of a circle
        # testing operand: '>', '>=' and '='
        q = """MATCH (r:restaurant)
        WHERE distance(r.location, point({latitude:30.27822306, longitude:-97.75134723})) > 1000
        RETURN r"""

        # make sure index is NOT used
        plan = redis_graph.execution_plan(q)
        self.env.assertNotIn("Index Scan", plan)

        q = """MATCH (r:restaurant)
        WHERE distance(r.location, point({latitude:30.27822306, longitude:-97.75134723})) >= 1000
        RETURN r"""

        # make sure index is NOT used
        plan = redis_graph.execution_plan(q)
        self.env.assertNotIn("Index Scan", plan)

        q = """MATCH (r:restaurant)
        WHERE distance(r.location, point({latitude:30.27822306, longitude:-97.75134723})) = 1000
        RETURN r"""

        # make sure index is NOT used
        plan = redis_graph.execution_plan(q)
        self.env.assertNotIn("Index Scan", plan)

    def test14_index_scan_utilize_array(self):
        # Querying indexed properties using IN a constant array should utilize indexes.
        query = "MATCH (a:person) WHERE a.age IN [34, 33] RETURN a.name ORDER BY a.name"
        plan = redis_graph.execution_plan(query)
        # One index scan should be performed.
        self.env.assertEqual(plan.count("Index Scan"), 1)
        query_result = redis_graph.query(query)
        expected_result = [["Noam Nativ"], ["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Querying indexed properties using IN a generated array should utilize indexes.
        query = "MATCH (a:person) WHERE a.age IN range(33, 34) RETURN a.name ORDER BY a.name"
        plan = redis_graph.execution_plan(query)
        # One index scan should be performed.
        self.env.assertEqual(plan.count("Index Scan"), 1)
        query_result = redis_graph.query(query)
        expected_result = [["Noam Nativ"], ["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # Querying indexed properties using IN a non-constant array should not utilize indexes.
        query = "MATCH (a:person)-[]->(b) WHERE a.age IN b.arr RETURN a"
        plan = redis_graph.execution_plan(query)
        # No index scans should be performed.
        self.env.assertEqual(plan.count("Label Scan"), 1)
        self.env.assertEqual(plan.count("Index Scan"), 0)

    # Test fulltext result scoring
    def test15_fulltext_result_scoring(self):
        g = Graph('fulltext_scoring', self.env.getConnection())

        # create full-text index over label 'L', attribute 'v'
        g.call_procedure('db.idx.fulltext.createNodeIndex', 'L', 'v')

        # introduce 2 nodes
        g.query("create (:L {v:'hello world hello'})")
        g.query("create (:L {v:'hello world hello world'})")

        # query nodes using fulltext search
        q = """CALL db.idx.fulltext.queryNodes('L', 'hello world') YIELD node, score
               RETURN node.v, score
               ORDER BY score"""
        res = g.query(q)
        actual = res.result_set
        expected = [['hello world hello', 1.5], ['hello world hello world', 2]]
        self.env.assertEqual(expected, actual)

    def test16_runtime_index_utilization(self):
        # find all person nodes with age in the range 33-37
        # current age (x) should be resolved at runtime
        # index query should be constructed for each age value
        q = """UNWIND range(33, 37) AS x
        MATCH (p:person {age:x})
        RETURN p.name
        ORDER BY p.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Noam Nativ"], ["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # similar to the query above, only this time the filter is specified
        # by an OR condition
        q = """WITH 33 AS min, 34 AS max 
        MATCH (p:person)
        WHERE p.age = min OR p.age = max
        RETURN p.name
        ORDER BY p.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Noam Nativ"], ["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # find all person nodes with age equals 33 'x'
        # 'x' value is known only at runtime
        q = """WITH 33 AS x
        MATCH (p:person {age:x})
        RETURN p.name
        ORDER BY p.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # find all person nodes with age equals x + 1
        # the expression x+1 is evaluated to the constant 33 only at runtime
        # expecting index query to be constructed at runtime
        q = """WITH 32 AS x
        MATCH (p:person)
        WHERE p.age = (x + 1)
        RETURN p.name
        ORDER BY p.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # same idea as previous query only we've switched the position of the
        # operands, queried entity (p.age) is now on the right hand side of the
        # filter, expecting the same behavior
        q = """WITH 32 AS x
        MATCH (p:person)
        WHERE (x + 1) = p.age
        RETURN p.name
        ORDER BY p.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Omri Traub"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # find all person nodes 'b' with age greater than node 'a'
        # a's age value is determined only at runtime
        # expecting index to be used to resolve 'b' nodes, index query should be
        # constructed at runtime
        q = """MATCH (a:person {name:'Omri Traub'})
        WITH a AS a
        MATCH (b:person)
        WHERE b.age > a.age
        RETURN b.name
        ORDER BY b.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Noam Nativ"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # same idea as previous query, only this time we've switched filter
        # operands position, queries entity is on the right hand side
        q = """MATCH (a:person {name: 'Omri Traub'})
        WITH a AS a
        MATCH (b:person)
        WHERE a.age < b.age
        RETURN b.name
        ORDER BY b.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["Noam Nativ"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # TODO: The following query uses the "Value Hash Join" where it would be
        # better to use "Index Scan"
        q = """UNWIND range(33, 37) AS x MATCH (a:person {age:x}), (b:person {age:x}) RETURN a.name, b.name ORDER BY a.name, b.name"""

    def test17_runtime_index_utilization_array_values(self):
        # when constructing an index query at runtime it is possible to encounter
        # none indexable values e.g. Array, in which case the index will still be
        # utilize, producing every entity which was indexed with a none indexable value
        # to which the index scan operation will have to apply the original filter

        # create person nodes with array value for their 'age' attribute
        q = """CREATE (:person {age:[36], name:'leonard'}), (:person {age:[34], name:['maynard']})"""
        redis_graph.query(q)

        # find all person nodes with age value of [36]
        q = """WITH [36] AS age MATCH (a:person {age:age}) RETURN a.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["leonard"]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # find all person nodes with age > [33]
        q = """WITH [33] AS age MATCH (a:person) WHERE a.age > age RETURN a.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["leonard"], [["maynard"]]]
        self.env.assertEquals(query_result.result_set, expected_result)

        # combine indexable value with none-indexable value index query
        q = """WITH [33] AS age, 'leonard' AS name MATCH (a:person) WHERE a.age >= age AND a.name = name RETURN a.name"""
        plan = redis_graph.execution_plan(q)
        self.env.assertIn('Index Scan', plan)
        query_result = redis_graph.query(q)
        expected_result = [["leonard"]]
        self.env.assertEquals(query_result.result_set, expected_result)
コード例 #27
0
class testReturnDistinctFlow1(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global graph1
        redis_con = self.env.getConnection()
        graph1 = Graph("G1", redis_con)
        self.populate_graph()

    def populate_graph(self):
        global graph1
        graph1.query("CREATE (:PARENT {name: 'Stevie'})")
        graph1.query("CREATE (:PARENT {name: 'Mike'})")
        graph1.query("CREATE (:PARENT {name: 'James'})")
        graph1.query("CREATE (:PARENT {name: 'Rich'})")
        graph1.query(
            "MATCH (p:PARENT {name: 'Stevie'}) CREATE (p)-[:HAS]->(c:CHILD {name: 'child1'})"
        )
        graph1.query(
            "MATCH (p:PARENT {name: 'Stevie'}) CREATE (p)-[:HAS]->(c:CHILD {name: 'child2'})"
        )
        graph1.query(
            "MATCH (p:PARENT {name: 'Stevie'}) CREATE (p)-[:HAS]->(c:CHILD {name: 'child3'})"
        )
        graph1.query(
            "MATCH (p:PARENT {name: 'Mike'}) CREATE (p)-[:HAS]->(c:CHILD {name: 'child4'})"
        )
        graph1.query(
            "MATCH (p:PARENT {name: 'James'}) CREATE (p)-[:HAS]->(c:CHILD {name: 'child5'})"
        )
        graph1.query(
            "MATCH (p:PARENT {name: 'James'}) CREATE (p)-[:HAS]->(c:CHILD {name: 'child6'})"
        )

    def test_distinct_optimization(self):
        global graph1
        # Make sure we do not omit distinct when performain none aggregated projection.
        execution_plan = graph1.execution_plan(
            "MATCH (n) RETURN DISTINCT n.name, n.age")
        self.env.assertIn("Distinct", execution_plan)

        # Distinct should be omitted when performain aggregation.
        execution_plan = graph1.execution_plan(
            "MATCH (n) RETURN DISTINCT n.name, max(n.age)")
        self.env.assertNotIn("Distinct", execution_plan)

    def test_issue_395_scenario(self):
        global graph1
        # all
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN p.name")
        self.env.assertEqual(result.result_set,
                             [['Stevie'], ['Stevie'], ['Stevie'], ['Mike'],
                              ['James'], ['James']])

        # order
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN p.name ORDER BY p.name")
        self.env.assertEqual(result.result_set,
                             [['James'], ['James'], ['Mike'], ['Stevie'],
                              ['Stevie'], ['Stevie']])

        # limit
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN p.name LIMIT 2")
        self.env.assertEqual(result.result_set, [['Stevie'], ['Stevie']])

        # order+limit
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN p.name ORDER BY p.name LIMIT 2"
        )
        self.env.assertEqual(result.result_set, [['James'], ['James']])

        # all+distinct
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN DISTINCT p.name")
        self.env.assertEqual(result.result_set,
                             [['Stevie'], ['Mike'], ['James']])

        # order+distinct
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN DISTINCT p.name ORDER BY p.name"
        )
        self.env.assertEqual(result.result_set,
                             [['James'], ['Mike'], ['Stevie']])

        # limit+distinct
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN DISTINCT p.name LIMIT 2")
        self.env.assertEqual(result.result_set, [['Stevie'], ['Mike']])

        # order+limit+distinct
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(:CHILD) RETURN DISTINCT p.name ORDER BY p.name LIMIT 2"
        )
        self.env.assertEqual(result.result_set, [['James'], ['Mike']])

    def test_distinct_with_order(self):
        # The results of DISTINCT should not be affected by the values in the ORDER BY clause
        result = graph1.query(
            "MATCH (p:PARENT)-[:HAS]->(c:CHILD) RETURN DISTINCT p.name ORDER BY c.name"
        )
        self.env.assertEqual(result.result_set,
                             [['Stevie'], ['Mike'], ['James']])

        result = graph1.query(
            "UNWIND range(0,3) AS a UNWIND range(4,7) AS b RETURN DISTINCT a ORDER BY b"
        )
        self.env.assertEqual(result.result_set, [[3], [2], [1], [0]])
コード例 #28
0
class TestAggregate():
    def __init__(self):
        self.env = Env()
        add_values(self.env)

    def testGroupBy(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'SORTBY', 2, '@count', 'desc',
            'LIMIT', '0', '5'
        ]

        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual([
            292L, ['brand', '', 'count', '1518'],
            ['brand', 'mad catz', 'count', '43'],
            ['brand', 'generic', 'count', '40'],
            ['brand', 'steelseries', 'count', '37'],
            ['brand', 'logitech', 'count', '35']
        ], res)

    def testMinMax(self):
        cmd = [
            'ft.aggregate', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'count', '0', 'REDUCE', 'min', '1', '@price', 'as',
            'minPrice', 'SORTBY', '2', '@minPrice', 'DESC'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        row = to_dict(res[1])
        self.env.assertEqual(88, int(float(row['minPrice'])))

        cmd = [
            'ft.aggregate', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'count', '0', 'REDUCE', 'max', '1', '@price', 'as',
            'maxPrice', 'SORTBY', '2', '@maxPrice', 'DESC'
        ]
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        self.env.assertEqual(695, int(float(row['maxPrice'])))

    def testAvg(self):
        cmd = [
            'ft.aggregate', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'avg', '1', '@price', 'AS', 'avg_price', 'REDUCE',
            'count', '0', 'SORTBY', '2', '@avg_price', 'DESC'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual(26, res[0])
        # Ensure the formatting actually exists

        first_row = to_dict(res[1])
        self.env.assertEqual(109, int(float(first_row['avg_price'])))

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertIn('avg_price', row)

        # Test aliasing
        cmd = [
            'FT.AGGREGATE', 'games', 'sony', 'GROUPBY', '1', '@brand',
            'REDUCE', 'avg', '1', '@price', 'AS', 'avgPrice'
        ]
        res = self.env.cmd(*cmd)
        first_row = to_dict(res[1])
        self.env.assertEqual(17, int(float(first_row['avgPrice'])))

    def testCountDistinct(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT_DISTINCT', '1', '@title', 'AS', 'count_distinct(title)',
            'REDUCE', 'COUNT', '0'
        ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1484, int(row['count_distinct(title)']))

        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT_DISTINCTISH', '1', '@title', 'AS',
            'count_distinctish(title)', 'REDUCE', 'COUNT', '0'
        ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1461, int(row['count_distinctish(title)']))

    def testQuantile(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'QUANTILE', '2', '@price', '0.50', 'AS', 'q50', 'REDUCE',
            'QUANTILE', '2', '@price', '0.90', 'AS', 'q90', 'REDUCE',
            'QUANTILE', '2', '@price', '0.95', 'AS', 'q95', 'REDUCE', 'AVG',
            '1', '@price', 'REDUCE', 'COUNT', '0', 'AS', 'rowcount', 'SORTBY',
            '2', '@rowcount', 'DESC', 'MAX', '1'
        ]

        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        # TODO: Better samples
        self.env.assertAlmostEqual(14.99, float(row['q50']), delta=3)
        self.env.assertAlmostEqual(70, float(row['q90']), delta=50)
        self.env.assertAlmostEqual(110, (float(row['q95'])), delta=50)

    def testStdDev(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'STDDEV', '1', '@price', 'AS', 'stddev(price)', 'REDUCE', 'AVG',
            '1', '@price', 'AS', 'avgPrice', 'REDUCE', 'QUANTILE', '2',
            '@price', '0.50', 'AS', 'q50Price', 'REDUCE', 'COUNT', '0', 'AS',
            'rowcount', 'SORTBY', '2', '@rowcount', 'DESC', 'LIMIT', '0', '10'
        ]
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])

        self.env.assertTrue(10 <= int(float(row['q50Price'])) <= 20)
        self.env.assertAlmostEqual(53,
                                   int(float(row['stddev(price)'])),
                                   delta=50)
        self.env.assertEqual(29, int(float(row['avgPrice'])))

    def testParseTime(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT', '0', 'AS', 'count', 'APPLY', 'timefmt(1517417144)', 'AS',
            'dt', 'APPLY', 'parse_time("%FT%TZ", @dt)', 'as', 'parsed_dt',
            'LIMIT', '0', '1'
        ]
        res = self.env.cmd(*cmd)

        self.env.assertEqual([
            'brand', '', 'count', '1518', 'dt', '2018-01-31T16:45:44Z',
            'parsed_dt', '1517417144'
        ], res[1])

    def testRandomSample(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'COUNT', '0', 'AS', 'num', 'REDUCE', 'RANDOM_SAMPLE', '2',
            '@price', '10', 'SORTBY', '2', '@num', 'DESC', 'MAX', '10'
        ]
        for row in self.env.cmd(*cmd)[1:]:
            self.env.assertIsInstance(row[5], list)
            self.env.assertGreater(len(row[5]), 0)
            self.env.assertGreaterEqual(row[3], len(row[5]))

            self.env.assertLessEqual(len(row[5]), 10)

    def testTimeFunctions(self):
        cmd = [
            'FT.AGGREGATE', 'games', '*', 'APPLY', '1517417144', 'AS', 'dt',
            'APPLY', 'timefmt(@dt)', 'AS', 'timefmt', 'APPLY', 'day(@dt)',
            'AS', 'day', 'APPLY', 'hour(@dt)', 'AS', 'hour', 'APPLY',
            'minute(@dt)', 'AS', 'minute', 'APPLY', 'month(@dt)', 'AS',
            'month', 'APPLY', 'dayofweek(@dt)', 'AS', 'dayofweek', 'APPLY',
            'dayofmonth(@dt)', 'AS', 'dayofmonth', 'APPLY', 'dayofyear(@dt)',
            'AS', 'dayofyear', 'APPLY', 'year(@dt)', 'AS', 'year', 'LIMIT',
            '0', '1'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertListEqual([
            1L,
            [
                'dt', '1517417144', 'timefmt', '2018-01-31T16:45:44Z', 'day',
                '1517356800', 'hour', '1517414400', 'minute', '1517417100',
                'month', '1514764800', 'dayofweek', '3', 'dayofmonth', '31',
                'dayofyear', '30', 'year', '2018'
            ]
        ], res)

    def testStringFormat(self):
        cmd = [
            'FT.AGGREGATE', 'games', '@brand:sony', 'GROUPBY', '2', '@title',
            '@brand', 'REDUCE', 'COUNT', '0', 'REDUCE', 'MAX', '1', '@price',
            'AS', 'price', 'APPLY',
            'format("%s|%s|%s|%s", @title, @brand, "Mark", @price)', 'as',
            'titleBrand', 'LIMIT', '0', '10'
        ]
        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            expected = '%s|%s|%s|%g' % (row['title'], row['brand'], 'Mark',
                                        float(row['price']))
            self.env.assertEqual(expected, row['titleBrand'])

    def testSum(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'REDUCE', 'sum', 1, '@price', 'AS',
            'sum(price)', 'SORTBY', 2, '@sum(price)', 'desc', 'LIMIT', '0', '5'
        ]
        res = self.env.cmd(*cmd)
        self.env.assertEqual([
            292L, ['brand', '', 'count', '1518', 'sum(price)', '44780.69'],
            ['brand', 'mad catz', 'count', '43', 'sum(price)', '3973.48'],
            ['brand', 'razer', 'count', '26', 'sum(price)', '2558.58'],
            ['brand', 'logitech', 'count', '35', 'sum(price)', '2329.21'],
            ['brand', 'steelseries', 'count', '37', 'sum(price)', '1851.12']
        ], res)

    def testFilter(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'FILTER', '@count > 5'
        ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertGreater(int(row['count']), 5)

        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count', '0', 'AS', 'count', 'FILTER', '@count < 5', 'FILTER',
            '@count > 2 && @brand != ""'
        ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertLess(int(row['count']), 5)
            self.env.assertGreater(int(row['count']), 2)

    def testToList(self):
        cmd = [
            'ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE',
            'count_distinct', '1', '@price', 'as', 'count', 'REDUCE', 'tolist',
            1, '@price', 'as', 'prices', 'SORTBY', 2, '@count', 'desc',
            'LIMIT', '0', '5'
        ]
        res = self.env.cmd(*cmd)

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertEqual(int(row['count']), len(row['prices']))

    def testSortBy(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1',
                           '@brand', 'REDUCE', 'sum', 1, '@price', 'as',
                           'price', 'SORTBY', 2, '@price', 'desc', 'LIMIT',
                           '0', '2')

        self.env.assertListEqual([
            292L, ['brand', '', 'price', '44780.69'],
            ['brand', 'mad catz', 'price', '3973.48']
        ], res)

        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1',
                           '@brand', 'REDUCE', 'sum', 1, '@price', 'as',
                           'price', 'SORTBY', 2, '@price', 'asc', 'LIMIT', '0',
                           '2')

        self.env.assertListEqual([
            292L, ['brand', 'myiico', 'price', '0.23'],
            ['brand', 'crystal dynamics', 'price', '0.25']
        ], res)

        # Test MAX with limit higher than it
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1',
                           '@brand', 'REDUCE', 'sum', 1, '@price', 'as',
                           'price', 'SORTBY', 2, '@price', 'asc', 'MAX', 2)

        self.env.assertListEqual([
            292L, ['brand', 'myiico', 'price', '0.23'],
            ['brand', 'crystal dynamics', 'price', '0.25']
        ], res)

        # Test Sorting by multiple properties
        res = self.env.cmd(
            'ft.aggregate',
            'games',
            '*',
            'GROUPBY',
            '1',
            '@brand',
            'REDUCE',
            'sum',
            1,
            '@price',
            'as',
            'price',
            'APPLY',
            '(@price % 10)',
            'AS',
            'price',
            'SORTBY',
            4,
            '@price',
            'asc',
            '@brand',
            'desc',
            'MAX',
            10,
        )
        self.env.assertListEqual([
            292L, ['brand', 'zps', 'price', '0'],
            ['brand', 'zalman', 'price', '0'], [
                'brand', 'yoozoo', 'price', '0'
            ], ['brand', 'white label', 'price', '0'],
            ['brand', 'stinky', 'price', '0'],
            ['brand', 'polaroid', 'price', '0'],
            ['brand', 'plantronics', 'price', '0'],
            ['brand', 'ozone', 'price', '0'], ['brand', 'oooo', 'price', '0'],
            ['brand', 'neon', 'price', '0']
        ], res)

    def testExpressions(self):
        pass

    def testNoGroup(self):
        res = self.env.cmd(
            'ft.aggregate',
            'games',
            '*',
            'LOAD',
            '2',
            '@brand',
            '@price',
            'APPLY',
            'floor(sqrt(@price)) % 10',
            'AS',
            'price',
            'SORTBY',
            4,
            '@price',
            'desc',
            '@brand',
            'desc',
            'MAX',
            5,
        )
        exp = [
            2265L, ['brand', 'Xbox', 'price', '9'],
            ['brand', 'turtle beach', 'price', '9'],
            ['brand', 'trust', 'price', '9'],
            ['brand', 'steelseries', 'price', '9'],
            ['brand', 'speedlink', 'price', '9']
        ]
        # exp = [2265L, ['brand', 'Xbox', 'price', '9'], ['brand', 'Turtle Beach', 'price', '9'], [
        #  'brand', 'Trust', 'price', '9'], ['brand', 'SteelSeries', 'price', '9'], ['brand', 'Speedlink', 'price', '9']]
        self.env.assertListEqual(exp[1], res[1])

    def testLoad(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '3', '@brand',
                           '@price', '@nonexist', 'SORTBY', 2, '@price',
                           'DESC', 'MAX', 2)
        exp = [
            3L, ['brand', '', 'price', '759.12'],
            ['brand', 'Sony', 'price', '695.8']
        ]
        self.env.assertEqual(exp[1], res[1])
        self.env.assertEqual(exp[2], res[2])

    def testLoadWithDocId(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '3', '@brand',
                           '@price', '@__key', 'SORTBY', 2, '@price', 'DESC',
                           'MAX', 4)
        exp = [
            3L, ['brand', '', 'price', '759.12', '__key', 'B00006JJIC'],
            ['brand', 'Sony', 'price', '695.8', '__key', 'B000F6W1AG']
        ]
        self.env.assertEqual(exp[1], res[1])
        self.env.assertEqual(exp[2], res[2])

        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '3', '@brand',
                           '@price', '@__key', 'FILTER',
                           '@__key == "B000F6W1AG"')
        self.env.assertEqual(
            res[1], ['brand', 'Sony', 'price', '695.8', '__key', 'B000F6W1AG'])

    def testLoadImplicit(self):
        # same as previous
        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '1', '@brand',
                           'SORTBY', 2, '@price', 'DESC')
        exp = [
            3L, ['brand', '', 'price', '759.12'],
            ['brand', 'Sony', 'price', '695.8']
        ]
        self.env.assertEqual(exp[1], res[1])

    def testSplit(self):
        res = self.env.cmd(
            'ft.aggregate', 'games', '*', 'APPLY',
            'split("hello world,  foo,,,bar,", ",", " ")', 'AS', 'strs',
            'APPLY', 'split("hello world,  foo,,,bar,", " ", ",")', 'AS',
            'strs2', 'APPLY', 'split("hello world,  foo,,,bar,", "", "")',
            'AS', 'strs3', 'APPLY', 'split("hello world,  foo,,,bar,")', 'AS',
            'strs4', 'APPLY', 'split("hello world,  foo,,,bar,",",")', 'AS',
            'strs5', 'APPLY', 'split("")', 'AS', 'empty', 'LIMIT', '0', '1')
        # print "Got {} results".format(len(res))
        # return
        # pprint.pprint(res)
        self.env.assertListEqual([
            1L,
            [
                'strs', ['hello world', 'foo', 'bar'], 'strs2',
                ['hello', 'world', 'foo,,,bar'], 'strs3',
                ['hello world,  foo,,,bar,'], 'strs4',
                ['hello world', 'foo', 'bar'], 'strs5',
                ['hello world', 'foo', 'bar'], 'empty', []
            ]
        ], res)

    def testFirstValue(self):
        res = self.env.cmd(
            'ft.aggregate', 'games',
            '@brand:(sony|matias|beyerdynamic|(mad catz))', 'GROUPBY', 1,
            '@brand', 'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price',
            'DESC', 'AS', 'top_item', 'REDUCE', 'FIRST_VALUE', 4, '@price',
            'BY', '@price', 'DESC', 'AS', 'top_price', 'REDUCE', 'FIRST_VALUE',
            4, '@title', 'BY', '@price', 'ASC', 'AS', 'bottom_item', 'REDUCE',
            'FIRST_VALUE', 4, '@price', 'BY', '@price', 'ASC', 'AS',
            'bottom_price', 'SORTBY', 2, '@top_price', 'DESC', 'MAX', 5)
        expected = [
            4L,
            [
                'brand', 'sony', 'top_item',
                'sony psp slim &amp; lite 2000 console', 'top_price', '695.8',
                'bottom_item',
                'sony dlchd20p high speed hdmi cable for playstation 3',
                'bottom_price', '5.88'
            ],
            [
                'brand', 'matias', 'top_item', 'matias halfkeyboard usb',
                'top_price', '559.99', 'bottom_item',
                'matias halfkeyboard usb', 'bottom_price', '559.99'
            ],
            [
                'brand', 'beyerdynamic', 'top_item',
                'beyerdynamic mmx300 pc gaming premium digital headset with microphone',
                'top_price', '359.74', 'bottom_item',
                'beyerdynamic headzone pc gaming digital surround sound system with mmx300 digital headset with microphone',
                'bottom_price', '0'
            ],
            [
                'brand', 'mad catz', 'top_item',
                'mad catz s.t.r.i.k.e.7 gaming keyboard', 'top_price',
                '295.95', 'bottom_item',
                'madcatz mov4545 xbox replacement breakaway cable',
                'bottom_price', '3.49'
            ]
        ]

        # hack :(
        def mklower(result):
            for arr in result[1:]:
                for x in range(len(arr)):
                    arr[x] = arr[x].lower()

        mklower(expected)
        mklower(res)
        self.env.assertListEqual(expected, res)

    def testLoadAfterGroupBy(self):
        with self.env.assertResponseError():
            self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', 1, '@brand',
                         'LOAD', 1, '@brand')

    def testReducerGeneratedAliasing(self):
        rv = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', 1, '@brand',
                          'REDUCE', 'MIN', 1, '@price', 'LIMIT', 0, 1)
        self.env.assertEqual(
            [292L, ['brand', '', '__generated_aliasminprice', '0']], rv)

        rv = self.env.cmd('ft.aggregate', 'games',
                          '@brand:(sony|matias|beyerdynamic|(mad catz))',
                          'GROUPBY', 1, '@brand', 'REDUCE', 'FIRST_VALUE', 4,
                          '@title', 'BY', '@price', 'DESC', 'SORTBY', 2,
                          '@brand', 'ASC')
        self.env.assertEqual('__generated_aliasfirst_valuetitle,by,price,desc',
                             rv[1][2])

    def testIssue1125(self):
        self.env.skipOnCluster()
        # SEARCH should fail
        self.env.expect('ft.search', 'games', '*', 'limit', 0, 2000000).error()     \
                .contains('LIMIT exceeds maximum of 1000000')
        # SEARCH should succeed
        self.env.expect('ft.config', 'set', 'MAXSEARCHRESULTS', -1).ok()
        rv = self.env.cmd('ft.search', 'games', '*', 'LIMIT', 0, 12345678)
        self.env.assertEqual(4531, len(rv))
        # AGGREGATE should succeed
        rv = self.env.cmd('ft.aggregate', 'games', '*', 'LIMIT', 0, 12345678)
        self.env.assertEqual(2266, len(rv))
        # AGGREGATE should fail
        self.env.expect('ft.config', 'set', 'MAXAGGREGATERESULTS',
                        1000000).ok()
        self.env.expect('ft.aggregate', 'games', '*', 'limit', 0, 2000000).error()     \
                .contains('LIMIT exceeds maximum of 1000000')

        # force global limit on aggregate
        num = 10
        self.env.expect('ft.config', 'set', 'MAXAGGREGATERESULTS', num).ok()
        rv = self.env.cmd('ft.aggregate', 'games', '*')
        self.env.assertEqual(num + 1, len(rv))

    def testMultiSortBy(self):
        self.env.expect('ft.aggregate', 'games', '*',
                           'LOAD', '2', '@brand', '@price',
                           'SORTBY', 2, '@brand', 'DESC',
                           'SORTBY', 2, '@price', 'DESC').error()\
                            .contains('Multiple SORTBY steps are not allowed. Sort multiple fields in a single step')
コード例 #29
0
class testRelationPattern(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)
        self.populate_graph()

    def populate_graph(self):
        # Construct a graph with the form:
        # (v1)-[:e]->(v2)-[:e]->(v3)
        node_props = ['v1', 'v2', 'v3']

        nodes = []
        for idx, v in enumerate(node_props):
            node = Node(label="L", properties={"val": v})
            nodes.append(node)
            redis_graph.add_node(node)

        edge = Edge(nodes[0], "e", nodes[1])
        redis_graph.add_edge(edge)

        edge = Edge(nodes[1], "e", nodes[2])
        redis_graph.add_edge(edge)

        redis_graph.commit()

    # Test patterns that traverse 1 edge.
    def test01_one_hop_traversals(self):
        # Conditional traversal with label
        query = """MATCH (a)-[:e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_a = redis_graph.query(query)

        # Conditional traversal without label
        query = """MATCH (a)-[]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_b = redis_graph.query(query)

        # Fixed-length 1-hop traversal with label
        query = """MATCH (a)-[:e*1]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_c = redis_graph.query(query)

        # Fixed-length 1-hop traversal without label
        query = """MATCH (a)-[*1]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        result_d = redis_graph.query(query)

        self.env.assertEquals(result_b.result_set, result_a.result_set)
        self.env.assertEquals(result_c.result_set, result_a.result_set)
        self.env.assertEquals(result_d.result_set, result_a.result_set)

    # Test patterns that traverse 2 edges.
    def test02_two_hop_traversals(self):
        # Conditional two-hop traversal without referenced intermediate node
        query = """MATCH (a)-[:e]->()-[:e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Fixed-length two-hop traversal (same expected result)
        query = """MATCH (a)-[:e*2]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal with a minimum bound of 2 (same expected result)
        query = """MATCH (a)-[*2..]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Conditional two-hop traversal with referenced intermediate node
        query = """MATCH (a)-[:e]->(b)-[:e]->(c) RETURN a.val, b.val, c.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test variable-length patterns
    def test03_var_len_traversals(self):
        # Variable-length traversal with label
        query = """MATCH (a)-[:e*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'], ['v1', 'v3'], ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal without label (same expected result)
        query = """MATCH (a)-[*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal with bounds 1..2 (same expected result)
        query = """MATCH (a)-[:e*1..2]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Variable-length traversal with bounds 0..1
        # This will return every node and itself, as well as all
        # single-hop edges.
        query = """MATCH (a)-[:e*0..1]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v1'], ['v1', 'v2'], ['v2', 'v2'],
                           ['v2', 'v3'], ['v3', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test variable-length patterns with alternately labeled source
    # and destination nodes, which can cause different execution sequences.
    def test04_variable_length_labeled_nodes(self):
        # Source and edge labeled variable-length traversal
        query = """MATCH (a:L)-[:e*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'], ['v1', 'v3'], ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Destination and edge labeled variable-length traversal (same expected result)
        query = """MATCH (a)-[:e*]->(b:L) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Source labeled variable-length traversal (same expected result)
        query = """MATCH (a:L)-[*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Destination labeled variable-length traversal (same expected result)
        query = """MATCH (a)-[*]->(b:L) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test traversals over explicit relationship types
    def test05_relation_types(self):
        # Add two nodes and two edges of a new type.
        # The new form of the graph will be:
        # (v1)-[:e]->(v2)-[:e]->(v3)-[:q]->(v4)-[:q]->(v5)
        query = """MATCH (n {val: 'v3'}) CREATE (n)-[:q]->(:L {val: 'v4'})-[:q]->(:L {val: 'v5'})"""
        actual_result = redis_graph.query(query)
        self.env.assertEquals(actual_result.nodes_created, 2)
        self.env.assertEquals(actual_result.relationships_created, 2)

        # Verify the graph structure
        query = """MATCH (a)-[e]->(b) RETURN a.val, b.val, TYPE(e) ORDER BY TYPE(e), a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2', 'e'], ['v2', 'v3', 'e'],
                           ['v3', 'v4', 'q'], ['v4', 'v5', 'q']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify conditional traversals with explicit relation types
        query = """MATCH (a)-[:e]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'], ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH (a)-[:q]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v3', 'v4'], ['v4', 'v5']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify conditional traversals with multiple explicit relation types
        query = """MATCH (a)-[e:e|:q]->(b) RETURN a.val, b.val, TYPE(e) ORDER BY TYPE(e), a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2', 'e'], ['v2', 'v3', 'e'],
                           ['v3', 'v4', 'q'], ['v4', 'v5', 'q']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify variable-length traversals with explicit relation types
        query = """MATCH (a)-[:e*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'], ['v1', 'v3'], ['v2', 'v3']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """MATCH (a)-[:q*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v3', 'v4'], ['v3', 'v5'], ['v4', 'v5']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Verify variable-length traversals with multiple explicit relation types
        query = """MATCH (a)-[:e|:q*]->(b) RETURN a.val, b.val ORDER BY a.val, b.val"""
        actual_result = redis_graph.query(query)
        expected_result = [['v1', 'v2'], ['v1', 'v3'], ['v1', 'v4'],
                           ['v1', 'v5'], ['v2', 'v3'], ['v2', 'v4'],
                           ['v2', 'v5'], ['v3', 'v4'], ['v3', 'v5'],
                           ['v4', 'v5']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Test traversals over transposed edge matrices.
    def test06_transposed_traversals(self):
        # The intermediate node 'b' will be used to form the scan operation because it is filtered.
        # As such, one of the traversals must be transposed.
        query = """MATCH (a)-[e]->(b {val:'v3'})-[]->(c:L) RETURN COUNT(e)"""
        plan = redis_graph.execution_plan(query)

        # Verify that the execution plan contains two traversals following opposing edge directions.
        self.env.assertIn("<-", plan)
        self.env.assertIn("->", plan)

        # Verify results.
        actual_result = redis_graph.query(query)
        expected_result = [[1]]
        self.env.assertEquals(actual_result.result_set, expected_result)
コード例 #30
0
class testFunctionCallsFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global graph
        global redis_con
        redis_con = self.env.getConnection()
        graph = Graph("G", redis_con)
        self.populate_graph()

    def populate_graph(self):
        global graph
        nodes = {}
        # Create entities
        for idx, p in enumerate(people):
            node = Node(label="person", properties={"name": p, "val": idx})
            graph.add_node(node)
            nodes[p] = node

        # Fully connected graph
        for src in nodes:
            for dest in nodes:
                if src != dest:
                    edge = Edge(nodes[src], "know", nodes[dest])
                    graph.add_edge(edge)

        for src in nodes:
            for dest in nodes:
                if src != dest:
                    edge = Edge(nodes[src], "works_with", nodes[dest])
                    graph.add_edge(edge)

        graph.commit()
        query = """MATCH (a)-[:know]->(b) CREATE (a)-[:know]->(b)"""
        graph.query(query)

    def expect_type_error(self, query):
        try:
            graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting a type error.
            self.env.assertIn("Type mismatch", str(e))

    def expect_error(self, query, expected_err_msg):
        try:
            graph.query(query)
            assert (False)
        except redis.exceptions.ResponseError as e:
            # Expecting a type error.
            self.env.assertIn(expected_err_msg, str(e))

    # Validate capturing of errors prior to query execution.
    def test01_compile_time_errors(self):
        query = """RETURN toUpper(5)"""
        self.expect_type_error(query)

        query = """RETURN 'a' * 2"""
        self.expect_type_error(query)

        query = """RETURN max(1 + min(2))"""
        self.expect_error(
            query,
            "Can't use aggregate functions inside of aggregate functions")

    def test02_boolean_comparisons(self):
        query = """RETURN true = 5"""
        actual_result = graph.query(query)
        expected_result = [[False]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN true <> 'str'"""
        actual_result = graph.query(query)
        expected_result = [[True]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN 'anything' <> NULL"""
        actual_result = graph.query(query)
        expected_result = [[None]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN 'anything' = NULL"""
        actual_result = graph.query(query)
        expected_result = [[None]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN 10 >= 1.5"""
        actual_result = graph.query(query)
        expected_result = [[True]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """RETURN -1 < 1"""
        actual_result = graph.query(query)
        expected_result = [[True]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test03_boolean_errors(self):
        query = """RETURN 'str' < 5.5"""
        self.expect_type_error(query)

        query = """RETURN true > 5"""
        self.expect_type_error(query)

        query = """MATCH (a) RETURN a < 'anything' LIMIT 1"""
        self.expect_type_error(query)

    def test04_entity_functions(self):
        query = "RETURN ID(5)"
        self.expect_type_error(query)

        query = "MATCH (a) RETURN ID(a) ORDER BY ID(a) LIMIT 3"
        actual_result = graph.query(query)
        expected_result = [[0], [1], [2]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = "MATCH (a)-[e]->() RETURN ID(e) ORDER BY ID(e) LIMIT 3"
        actual_result = graph.query(query)
        expected_result = [[0], [1], [2]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = "RETURN EXISTS(null)"
        actual_result = graph.query(query)
        expected_result = [[False]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = "RETURN EXISTS('anything')"
        actual_result = graph.query(query)
        expected_result = [[True]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test07_nonmap_errors(self):
        query = """MATCH (a) WITH a.name AS scalar RETURN scalar.name"""
        self.expect_type_error(query)

    def test08_apply_all_function(self):
        query = "MATCH () RETURN COUNT(*)"
        actual_result = graph.query(query)
        expected_result = [[4]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = "UNWIND [1, 2] AS a RETURN COUNT(*)"
        actual_result = graph.query(query)
        expected_result = [[2]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # COLLECT should associate false and 'false' to different groups.
        query = "UNWIND [false,'false',0,'0'] AS a RETURN a, count(a)"
        actual_result = graph.query(query)
        expected_result = [[0, 1], [False, 1], ["false", 1], ['0', 1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test09_static_aggregation(self):
        query = "RETURN count(*)"
        actual_result = graph.query(query)
        expected_result = [[1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = "RETURN max(2)"
        actual_result = graph.query(query)
        expected_result = [[2]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = "RETURN min(3)"
        actual_result = graph.query(query)
        expected_result = [[3]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test10_modulo_inputs(self):
        # Validate modulo with integer inputs.
        query = "RETURN 5 % 2"
        actual_result = graph.query(query)
        expected_result = [[1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Validate modulo with a floating-point dividend.
        query = "RETURN 5.5 % 2"
        actual_result = graph.query(query)
        expected_result = [[1.5]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Validate modulo with a floating-point divisor.
        query = "RETURN 5 % 2.5"
        actual_result = graph.query(query)
        expected_result = [[0]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Validate modulo with both a floating-point dividen and a floating-point divisor.
        query = "RETURN 5.5 % 2.5"
        actual_result = graph.query(query)
        expected_result = [[0.5]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Validate modulo with negative integer inputs.
        query = "RETURN -5 % -2"
        actual_result = graph.query(query)
        expected_result = [[-1]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Validate modulo with negative floating-point inputs.
        query = "RETURN -5.5 % -2.5"
        actual_result = graph.query(query)
        expected_result = [[-0.5]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Validate modulo by 0
        query = "RETURN 3 % 0"
        try:
            actual_result = graph.query(query)
        except redis.ResponseError as e:
            self.env.assertContains("Division by zero", str(e))

    # Aggregate functions should handle null inputs appropriately.
    def test11_null_aggregate_function_inputs(self):
        # SUM should sum all non-null inputs.
        query = """UNWIND [1, NULL, 3] AS a RETURN sum(a)"""
        actual_result = graph.query(query)
        expected_result = [[4]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # SUM should return 0 given a fully NULL input.
        query = """WITH NULL AS a RETURN sum(a)"""
        actual_result = graph.query(query)
        expected_result = [[0]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # COUNT should count all non-null inputs.
        query = """UNWIND [1, NULL, 3] AS a RETURN count(a)"""
        actual_result = graph.query(query)
        expected_result = [[2]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # COUNT should return 0 given a fully NULL input.
        query = """WITH NULL AS a RETURN count(a)"""
        actual_result = graph.query(query)
        expected_result = [[0]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # COLLECT should ignore null inputs.
        query = """UNWIND [1, NULL, 3] AS a RETURN collect(a)"""
        actual_result = graph.query(query)
        expected_result = [[[1, 3]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # COLLECT should return an empty array on all null inputs.
        query = """WITH NULL AS a RETURN collect(a)"""
        actual_result = graph.query(query)
        expected_result = [[[]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # Verify that nested functions that perform heap allocations return properly.
    def test12_nested_heap_functions(self):
        query = """MATCH p = (n) WITH head(nodes(p)) AS node RETURN node.name ORDER BY node.name"""
        actual_result = graph.query(query)
        expected_result = [['Ailon'], ['Alon'], ['Boaz'], ['Roi']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # CASE...WHEN statements should properly handle NULL, false, and true evaluations.
    def test13_case_when_inputs(self):
        # Simple case form: single value evaluation.
        query = """UNWIND [NULL, true, false] AS v RETURN v, CASE v WHEN true THEN v END"""
        actual_result = graph.query(query)
        expected_result = [[None, None], [True, True], [False, None]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """UNWIND [NULL, true, false] AS v RETURN v, CASE v WHEN true THEN v WHEN false THEN v END"""
        actual_result = graph.query(query)
        expected_result = [[None, None], [True, True], [False, False]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Generic case form: evaluation for each case.
        query = """UNWIND [NULL, true, false] AS v RETURN v, CASE WHEN v THEN v END"""
        actual_result = graph.query(query)
        # Only the true value should return non-NULL.
        expected_result = [[None, None], [True, True], [False, None]]
        self.env.assertEquals(actual_result.result_set, expected_result)

        query = """UNWIND [NULL, true, false] AS v RETURN v, CASE WHEN v IS NOT NULL THEN v END"""
        actual_result = graph.query(query)
        # The true and false values should both return non-NULL.
        expected_result = [[None, None], [True, True], [False, False]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    # CASE...WHEN statements should manage allocated values properly.
    def test14_case_when_memory_management(self):
        # Simple case form: single value evaluation.
        query = """WITH 'A' AS a WITH CASE a WHEN 'A' THEN toString(a) END AS key RETURN toLower(key)"""
        actual_result = graph.query(query)
        expected_result = [['a']]
        self.env.assertEquals(actual_result.result_set, expected_result)
        # Generic case form: evaluation for each case.
        query = """WITH 'A' AS a WITH CASE WHEN true THEN toString(a) END AS key RETURN toLower(key)"""
        actual_result = graph.query(query)
        expected_result = [['a']]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test15_aggregate_error_handling(self):
        functions = [
            "avg", "collect", "count", "max", "min", "sum", "percentileDisc",
            "percentileCont", "stDev"
        ]
        # Test all functions for invalid argument counts.
        for function in functions:
            query = """UNWIND range(0, 10) AS val RETURN %s(val, val, val)""" % (
                function)
            self.expect_error(query, "Received 3 arguments")

        # Test numeric functions for invalid input types.
        numeric_functions = ["avg", "sum", "stDev"]
        for function in numeric_functions:
            query = """UNWIND ['a', 'b', 'c'] AS val RETURN %s(val)""" % (
                function)
            self.expect_type_error(query)

        # Test invalid numeric input for percentile function.
        query = """UNWIND range(0, 10) AS val RETURN percentileDisc(val, -1)"""
        self.expect_error(query, "must be a number in the range 0.0 to 1.0")

    # startNode and endNode calls should return the appropriate nodes.
    def test16_edge_endpoints(self):
        query = """MATCH (a)-[e]->(b) RETURN a.name, startNode(e).name, b.name, endNode(e).name"""
        actual_result = graph.query(query)
        for row in actual_result.result_set:
            self.env.assertEquals(row[0], row[1])
            self.env.assertEquals(row[2], row[3])

    def test17_to_json(self):
        # Test JSON literal values in an array.
        query = """RETURN toJSON([1, 'str', true, NULL])"""
        actual_result = graph.query(query)
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(parsed, [1, "str", True, None])

        # Test JSON an empty array value.
        query = """WITH [] AS arr RETURN toJSON(arr)"""
        actual_result = graph.query(query)
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(parsed, [])

        # Test JSON an empty map value.
        query = """WITH {} AS map RETURN toJSON(map)"""
        actual_result = graph.query(query)
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(parsed, {})

        # Test converting a map projection.
        query = """MATCH (n {val: 1}) RETURN toJSON(n {.val, .name})"""
        actual_result = graph.query(query)
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(parsed, {"name": "Alon", "val": 1})

        # Test converting a full node.
        query = """MATCH (n {val: 1}) RETURN toJSON(n)"""
        actual_result = graph.query(query)
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(
            parsed, {
                "type": "node",
                "id": 1,
                "labels": ["person"],
                "properties": {
                    "name": "Alon",
                    "val": 1
                }
            })

        # Test converting a full edge.
        query = """MATCH ({val: 0})-[e:works_with]->({val: 1}) RETURN toJSON(e)"""
        actual_result = graph.query(query)
        start = {
            "id": 0,
            "labels": ["person"],
            "properties": {
                "name": "Roi",
                "val": 0
            }
        }
        end = {
            "id": 1,
            "labels": ["person"],
            "properties": {
                "name": "Alon",
                "val": 1
            }
        }
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(
            parsed, {
                "type": "relationship",
                "id": 12,
                "relationship": "works_with",
                "properties": {},
                "start": start,
                "end": end
            })

        # Test converting a path.
        query = """MATCH path=({val: 0})-[e:works_with]->({val: 1}) RETURN toJSON(path)"""
        actual_result = graph.query(query)
        expected = [{
            'type': 'node',
            'id': 0,
            'labels': ['person'],
            'properties': {
                'name': 'Roi',
                'val': 0
            }
        }, {
            'type': 'relationship',
            'id': 12,
            'relationship': 'works_with',
            'properties': {},
            'start': {
                'id': 0,
                'labels': ['person'],
                'properties': {
                    'name': 'Roi',
                    'val': 0
                }
            },
            'end': {
                'id': 1,
                'labels': ['person'],
                'properties': {
                    'name': 'Alon',
                    'val': 1
                }
            }
        }, {
            'type': 'node',
            'id': 1,
            'labels': ['person'],
            'properties': {
                'name': 'Alon',
                'val': 1
            }
        }]
        parsed = json.loads(actual_result.result_set[0][0])
        self.env.assertEquals(parsed, expected)

    # Memory should be freed properly when the key values are heap-allocated.
    def test18_allocated_keys(self):
        query = """UNWIND ['str1', 'str1', 'str2', 'str1'] AS key UNWIND [1, 2, 3] as agg RETURN toUpper(key) AS key, collect(DISTINCT agg) ORDER BY key"""
        actual_result = graph.query(query)
        expected_result = [['STR1', [1, 2, 3]], ['STR2', [1, 2, 3]]]
        self.env.assertEquals(actual_result.result_set, expected_result)

    def test19_has_labels(self):
        # Test existing label
        query = """MATCH (n) WHERE n:person RETURN n.name"""
        actual_result = graph.query(query)
        expected_result = [['Roi'], ['Alon'], ['Ailon'], ['Boaz']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Test not existing label
        query = """MATCH (n) WHERE n:L RETURN n.name"""
        actual_result = graph.query(query)
        expected_result = []
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Test multi label
        query = """MATCH (n) WHERE n:person:L RETURN n.name"""
        actual_result = graph.query(query)
        expected_result = []
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Test or between different labels label
        query = """MATCH (n) WHERE n:person OR n:L RETURN n.name"""
        actual_result = graph.query(query)
        expected_result = [['Roi'], ['Alon'], ['Ailon'], ['Boaz']]
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Test multi label using functions
        query = """MATCH (n) WHERE hasLabels(n, ['person', 'L']) RETURN n.name"""
        actual_result = graph.query(query)
        expected_result = []
        self.env.assertEquals(actual_result.result_set, expected_result)

        # Test has labels using functions mismatch type
        query = """MATCH (n) WHERE hasLabels(n, ['person', 1]) RETURN n.name"""
        try:
            graph.query(query)
        except redis.ResponseError as e:
            self.env.assertContains(
                "Type mismatch: expected String but was Integer", str(e))
コード例 #31
0
class testAlgebraicExpressionOrder(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global graph
        redis_con = self.env.getConnection()
        graph = Graph(GRAPH_ID, redis_con)
        self.populate_graph()

    def populate_graph(self):
        # Construct a graph with the form:
        # (a:A)-[:E]->(b:B), (c:C)-[:E]->(b)
        a = Node(label="A", properties={"v": 1})
        graph.add_node(a)

        b = Node(label="B", properties={"v": 2})
        graph.add_node(b)

        c = Node(label="C", properties={"v": 3})
        graph.add_node(c)

        edge = Edge(a, "E", b)
        graph.add_edge(edge)

        edge = Edge(c, "E", b)
        graph.add_edge(edge)

        graph.commit()

    # Test differing patterns with the same destination node.
    def test01_same_destination_permutations(self):
        # Each query should return the same two records.
        expected_result = [[1, 2], [3, 2]]

        # Neither the source nor the destination is labeled, perform an AllNodeScan from the source node.
        query = """MATCH (a)-[:E]->(b) RETURN a.v, b.v ORDER BY a.v, b.v"""
        plan = graph.execution_plan(query)
        self.env.assertIn("All Node Scan | (a)", plan)
        result = graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Destination is labeled, perform a LabelScan from the destination node.
        query = """MATCH (a)-[:E]->(b:B) RETURN a.v, b.v ORDER BY a.v, b.v"""
        plan = graph.execution_plan(query)
        self.env.assertIn("Node By Label Scan | (b:B)", plan)
        result = graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Destination is filtered, perform an AllNodeScan from the destination node.
        query = """MATCH (a)-[:E]->(b) WHERE b.v = 2 RETURN a.v, b.v ORDER BY a.v, b.v"""
        plan = graph.execution_plan(query)
        self.env.assertIn("All Node Scan | (b)", plan)
        result = graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Destination is labeled but source is filtered, perform an AllNodeScan from the source node.
        query = """MATCH (a)-[:E]->(b:B) WHERE a.v = 1 OR a.v = 3 RETURN a.v, b.v ORDER BY a.v, b.v"""
        plan = graph.execution_plan(query)
        self.env.assertIn("All Node Scan | (a)", plan)
        result = graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # The subsequent queries will only return one record.
        expected_result = [[3, 2]]
        # Both are labeled and source is filtered, perform a LabelScan from the source node.
        query = """MATCH (a:C)-[:E]->(b:B) WHERE a.v = 3 RETURN a.v, b.v ORDER BY a.v, b.v"""
        plan = graph.execution_plan(query)
        self.env.assertIn("Node By Label Scan | (a:C)", plan)
        result = graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)

        # Both are labeled and dest is filtered, perform a LabelScan from the dest node.
        query = """MATCH (a:C)-[:E]->(b:B) WHERE b.v = 2 RETURN a.v, b.v ORDER BY a.v, b.v"""
        plan = graph.execution_plan(query)
        self.env.assertIn("Node By Label Scan | (b:B)", plan)
        result = graph.query(query)
        self.env.assertEquals(result.result_set, expected_result)