class TestAggregateSecondUseCases():
    def __init__(self):
        self.env = Env()
        add_values(self.env, 2)

    def testSimpleAggregate(self):
        res = self.env.cmd('ft.aggregate', 'games', '*')
        self.env.assertIsNotNone(res)
        self.env.assertEqual(len(res), 4531)

    def testSimpleAggregateWithCursor(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'WITHCURSOR', 'COUNT', 1000)
        self.env.assertTrue(res[1] != 0)
Beispiel #2
0
def testAofRewriteSortkeys():
    env = Env(useAof=True)
    env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 
            'SCHEMA', 'foo', 'TEXT', 'SORTABLE', 'bar', 'TAG')
    env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
    env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')

    res_exp = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
                      'RETURN', '1', 'foo', 'WITHSORTKEYS')

    env.restart_and_reload()
    res_got = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
                      'RETURN', '1', 'foo', 'WITHSORTKEYS')

    env.assertEqual(res_exp, res_got)
def testAofRewriteSortkeys():
    env = Env(useAof=True)
    env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
            'TEXT', 'SORTABLE', 'bar', 'TAG')
    env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
    env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')

    res_exp = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
                      'RETURN', '1', 'foo', 'WITHSORTKEYS')

    env.restart_and_reload()
    res_got = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
                      'RETURN', '1', 'foo', 'WITHSORTKEYS')

    env.assertEqual(res_exp, res_got)
Beispiel #4
0
def testDependenciesSavedToRDB():
    env = Env(moduleArgs='CreateVenv 1')
    conn = getConnectionByEnv(env)
    env.expect('RG.PYEXECUTE', "import redisgraph", 'REQUIREMENTS',
               'redisgraph').ok()
    for _ in env.reloading_iterator():
        res, err = env.cmd(
            'RG.PYEXECUTE',
            "GB('ShardsIDReader').flatmap(lambda x: execute('RG.PYDUMPREQS')).run()"
        )
        env.assertEqual(len(err), 0)
        env.assertEqual(len(res), env.shardsCount)
        for r in res:
            env.assertContains("'IsDownloaded', 'yes', 'IsInstalled', 'yes'",
                               r)
Beispiel #5
0
def testRDBCompatibility():
    # temp skip for out-of-index

    env = Env(
        moduleArgs=
        'UPGRADE_INDEX idx; PREFIX 1 tt; LANGUAGE french; LANGUAGE_FIELD MyLang; SCORE 0.5; SCORE_FIELD MyScore; PAYLOAD_FIELD MyPayload; UPGRADE_INDEX idx1'
    )
    # env = Env(moduleArgs=['UPGRADE_INDEX idx', 'PREFIX 1 tt', 'LANGUAGE french', 'LANGUAGE_FIELD MyLang', 'SCORE 0.5', 'SCORE_FIELD MyScore', 'PAYLOAD_FIELD MyPayload', 'UPGRADE_INDEX idx1'])
    # env = Env(moduleArgs=['UPGRADE_INDEX idx; PREFIX 1 tt; LANGUAGE french', 'LANGUAGE_FIELD MyLang', 'SCORE 0.5', 'SCORE_FIELD MyScore', 'PAYLOAD_FIELD MyPayload', 'UPGRADE_INDEX idx1'])

    env.skipOnCluster()
    dbFileName = env.cmd('config', 'get', 'dbfilename')[1]
    dbDir = env.cmd('config', 'get', 'dir')[1]
    rdbFilePath = os.path.join(dbDir, dbFileName)
    if not downloadFiles():
        if os.environ.get('CI'):
            env.assertTrue(
                False
            )  ## we could not download rdbs and we are running on CI, let fail the test
        else:
            env.skip()
            return

    for fileName in RDBS:
        env.stop()
        filePath = os.path.join(REDISEARCH_CACHE_DIR, fileName)
        try:
            os.unlink(rdbFilePath)
        except OSError:
            pass
        os.symlink(filePath, rdbFilePath)
        env.start()
        waitForIndex(env, 'idx')
        env.expect('FT.SEARCH idx * LIMIT 0 0').equal([1000])
        env.expect('DBSIZE').equal(1000)
        res = env.cmd('FT.INFO idx')
        res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
        env.assertEqual(res['index_definition'], [
            'key_type', 'HASH', 'prefixes', ['tt'], 'default_language',
            'french', 'language_field', 'MyLang', 'default_score', '0.5',
            'score_field', 'MyScore', 'payload_field', 'MyPayload'
        ])
        if fileName == 'redisearch_1.6.13_with_synonyms.rdb':
            res = env.cmd('FT.SYNDUMP idx')
            res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
            env.assertEqual(res, {'term2': ['0'], 'term1': ['0']})
        env.cmd('flushall')
        env.assertTrue(env.checkExitCode())
Beispiel #6
0
def testDelReplicate():
    env = Env(useSlaves=True, forceTcp=True)

    env.skipOnCluster()

    ## on existing env we can not get a slave connection
    ## so we can no test it
    if env.env == 'existing-env':
        env.skip()

    master = env.getConnection()
    slave = env.getSlaveConnection()
    env.assertTrue(master.execute_command("ping"))
    env.assertTrue(slave.execute_command("ping"))
    env.assertOk(
        master.execute_command('ft.create', 'idx', 'ON', 'HASH', 'FILTER',
                               'startswith(@__key, "")', 'schema', 'f',
                               'text'))
    env.cmd('set', 'indicator', '1')
    checkSlaveSynced(env, slave, ('exists', 'indicator'), 1, time_out=20)

    for i in range(10):
        master.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
                               'f', 'hello world')
    time.sleep(0.01)
    checkSlaveSynced(env,
                     slave, ('ft.get', 'idx', 'doc9'), ['f', 'hello world'],
                     time_out=20)

    for i in range(10):
        # checking for insertion
        env.assertEqual(['f', 'hello world'],
                        master.execute_command('ft.get', 'idx', 'doc%d' % i))
        env.assertEqual(['f', 'hello world'],
                        slave.execute_command('ft.get', 'idx', 'doc%d' % i))

        # deleting
        env.assertEqual(1, master.execute_command('ft.del', 'idx',
                                                  'doc%d' % i))

    checkSlaveSynced(env, slave, ('ft.get', 'idx', 'doc9'), None, time_out=20)

    for i in range(10):
        # checking for deletion
        env.assertEqual(None,
                        master.execute_command('ft.get', 'idx', 'doc%d' % i))
        env.assertEqual(None,
                        slave.execute_command('ft.get', 'idx', 'doc%d' % i))
Beispiel #7
0
def testExt():
    # extentionPath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/../tests/ext-example/example.so')
    env = Env(moduleArgs='EXTLOAD %s' % EXTPATH)

    N = 100
    env.assertOk(env.execute_command('ft.create', 'idx', 'schema', 'f',
                                     'text'))
    for i in range(N):
        env.assertOk(
            env.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
                                'f', 'hello world'))
    res = env.execute_command('ft.search', 'idx', 'hello world')
    env.assertEqual(N, res[0])
    res = env.execute_command('ft.search', 'idx', 'hello world', 'scorer',
                              'filterout_scorer')
    env.assertEqual(0, res[0])
Beispiel #8
0
def testDependenciesWithRegister():
    env = Env(moduleArgs='CreateVenv 1')
    env.skipOnCluster()
    env.expect(
        'RG.PYEXECUTE', "GB()."
        "map(lambda x: __import__('redisgraph'))."
        "collect().distinct().register()", 'REQUIREMENTS', 'redisgraph').ok()

    for _ in env.reloading_iterator():
        res = env.cmd(
            'RG.PYEXECUTE', "GB('ShardsIDReader')."
            "map(lambda x: str(__import__('redisgraph')))."
            "collect().distinct().run()")
        env.assertEqual(len(res[0]), env.shardsCount)
        env.assertEqual(len(res[1]), 0)
        env.assertContains("<module 'redisgraph'", res[0][0])
def test_keyspace_rules_send():
    sample_len = 1024
    env = Env()
    with env.getClusterConnectionIfNeeded() as r:
        r.execute_command('config', 'set', 'notify-keyspace-events', 'KEA')

        pubsub = r.pubsub()
        pubsub.psubscribe('__key*')
       
        time.sleep(1)
        env.assertEqual('psubscribe', pubsub.get_message()['type']) 

        r.execute_command('TS.CREATE', 'tester_src{2}')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.create')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')

        r.execute_command('TS.CREATE', 'tester_dest{2}')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.create')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')

        r.execute_command('TS.CREATERULE', 'tester_src{2}', 'tester_dest{2}', 'AGGREGATION', 'MAX', 1)
        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.createrule:src')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')

        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.createrule:dest')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')

        r.execute_command('ts.add', 'tester_src{2}', 100, 1.1)
        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')

        r.execute_command('ts.add', 'tester_src{2}', 101, 1.1)

        # First getting the event from the dest on the previous window 
        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add:dest')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')

        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')

        r.execute_command('ts.incrby', 'tester_src{2}', 3)

        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add:dest')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')

        assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.incrby')
        assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
Beispiel #10
0
def testRDBCompatibility():
    env = Env()
    env.skipOnCluster()
    skip_on_rlec()
    RDBS = os.listdir('rdbs')

    # Current RDB version check
    TSRANGE_RESULTS = {}
    TSINFO_RESULTS = {}
    KEYS = None

    current_r = env.getConnection()
    load_into_redis(current_r)
    KEYS = current_r.keys()
    KEYS.sort()
    for key in KEYS:
        TSRANGE_RESULTS[key] = current_r.execute_command(
            'ts.range', key, "-", "+")
        TSINFO_RESULTS[key] = normalize_info(
            current_r.execute_command('ts.info', key))

    # Compatibility check
    for fileName in RDBS:
        filePath = os.path.abspath(os.path.join("rdbs", fileName))
        dbFileName = env.cmd('config', 'get', 'dbfilename')[1].decode('ascii')
        dbDir = env.cmd('config', 'get', 'dir')[1].decode('ascii')
        rdbFilePath = os.path.join(dbDir, dbFileName)
        env.stop()
        try:
            os.remove(rdbFilePath)
        except FileNotFoundError:
            pass
        os.symlink(filePath, rdbFilePath)
        env.start()

        r = env.getConnection()
        OLD_KEYS = r.keys()
        newDbFileName = r.execute_command('config', 'get',
                                          'dbfilename')[1].decode('ascii')
        env.assertEqual(newDbFileName, dbFileName)
        OLD_KEYS.sort()
        env.assertEqual(OLD_KEYS, KEYS)
        for key in OLD_KEYS:
            assert r.execute_command('ts.range', key, "-",
                                     "+") == TSRANGE_RESULTS[key]
            assert normalize_info(r.execute_command(
                'ts.info', key)) == TSINFO_RESULTS[key]
def test_backfill_downsampling(self):
    env = Env()
    with env.getClusterConnectionIfNeeded() as r:
        key = 'tester{a}'
        type_list = ['', 'uncompressed']
        for chunk_type in type_list:
            agg_list = ['sum', 'min', 'max', 'count', 'first', 'last']  # more
            for agg in agg_list:
                agg_key = _insert_agg_data(
                    r,
                    key,
                    agg,
                    chunk_type,
                    key_create_args=['DUPLICATE_POLICY', 'LAST'])

                expected_result = r.execute_command('TS.RANGE', key, 10, 50,
                                                    'aggregation', agg, 10)
                actual_result = r.execute_command('TS.RANGE', agg_key, 10, 50)
                assert expected_result == actual_result
                assert r.execute_command('TS.ADD', key, 15, 50) == 15
                expected_result = r.execute_command('TS.RANGE', key, 10, 50,
                                                    'aggregation', agg, 10)
                actual_result = r.execute_command('TS.RANGE', agg_key, 10, 50)
                assert expected_result == actual_result

                # add in latest window
                r.execute_command('TS.ADD', key, 1055, 50) == 1055
                r.execute_command('TS.ADD', key, 1053, 55) == 1053
                r.execute_command('TS.ADD', key, 1062, 60) == 1062
                expected_result = r.execute_command('TS.RANGE', key, 10, 1060,
                                                    'aggregation', agg, 10)
                actual_result = r.execute_command('TS.RANGE', agg_key, 10,
                                                  1060)
                assert expected_result == actual_result

                # update in latest window
                r.execute_command('TS.ADD', key, 1065, 65) == 1065
                r.execute_command('TS.ADD', key, 1066, 66) == 1066
                r.execute_command('TS.ADD', key, 1001, 42) == 1001
                r.execute_command('TS.ADD', key, 1075, 50) == 1075
                expected_result = r.execute_command('TS.RANGE', key, 10, 1070,
                                                    'aggregation', agg, 10)
                actual_result = r.execute_command('TS.RANGE', agg_key, 10,
                                                  1070)
                env.assertEqual(expected_result, actual_result)
                r.execute_command('DEL', key)
                r.execute_command('DEL', agg_key)
def testDependenciesReplicatedToSlave():
    env = Env(useSlaves=True, env='oss', moduleArgs='CreateVenv 1')
    if env.envRunner.debugger is not None:
        env.skip() # valgrind is not working correctly with replication

    env.expect('RG.PYEXECUTE', "import redisgraph", 'REQUIREMENTS', 'redisgraph').ok()

    slaveConn = env.getSlaveConnection()
    try:
        with TimeLimit(5):
            res = []
            while len(res) < 1:
                res = slaveConn.execute_command('RG.PYDUMPREQS')
            env.assertEqual(len(res), 1)
            env.assertEqual(res[0][5], 'yes')
            env.assertEqual(res[0][7], 'yes')
    except Exception:
        env.assertTrue(False, message='Failed waiting for requirement to reach slave')
Beispiel #13
0
def testRewriteAofSortables():
    env = Env(useAof=True)
    env.cmd('FT.CREATE', 'idx', 'schema', 'field1', 'TEXT', 'SORTABLE', 'num1',
            'NUMERIC', 'SORTABLE')
    env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS', 'field1', 'Hello World')
    env.restart_and_reload()
    env.broadcast('SAVE')

    # Load some documents
    for x in xrange(100):
        env.cmd('FT.ADD', 'idx', 'doc{}'.format(x), 1.0, 'FIELDS', 'field1',
                'txt{}'.format(random.random()), 'num1', random.random())
    for sspec in [('field1', 'asc'), ('num1', 'desc')]:
        cmd = ['FT.SEARCH', 'idx', 'txt', 'SORTBY', sspec[0], sspec[1]]
        res = env.cmd(*cmd)
        env.restart_and_reload()
        res2 = env.cmd(*cmd)
        env.assertEqual(res, res2)
Beispiel #14
0
def test_sdadel():
    env = Env()
    env.cmd('rdict.sdset', 'myDict', '0.5', '1')
    env.cmd('rdict.sdset', 'myDict_2', '1', '2')
    env.cmd('rdict.sdset', 'myDict_3', '3', '4')
    env.assertEqual(
        sorted(env.expect('keys', '*').res),
        sorted([
            '{myDict}_sd1', '{myDict}_sd2', '{myDict_2}_sd1', '{myDict_2}_sd2',
            '{myDict_3}_sd1', '{myDict_3}_sd2'
        ]), 1)
    env.expect('rdict.sdadel', 'myDict_4', 'myDict_2').equal(1)
    env.assertEqual(
        sorted(env.expect('keys', '*').res),
        sorted([
            '{myDict}_sd1', '{myDict}_sd2', '{myDict_3}_sd1', '{myDict_3}_sd2'
        ]), 1)
    env.expect('rdict.sdadel', 'myDict', 'myDict_3').equal(2)
    env.expect('keys', '*').equal([])
Beispiel #15
0
def testRewriteAofSortables():
    env = Env(useAof=True)
    env.cmd('FT.CREATE', 'idx', 'schema', 'field1', 'TEXT',
            'SORTABLE', 'num1', 'NUMERIC', 'SORTABLE')
    env.cmd('FT.ADD', 'idx', 'doc', 1.0,
            'FIELDS', 'field1', 'Hello World')
    env.restart_and_reload()
    env.broadcast('SAVE')

    # Load some documents
    for x in xrange(100):
        env.cmd('FT.ADD', 'idx', 'doc{}'.format(x), 1.0, 'FIELDS',
                'field1', 'txt{}'.format(random.random()),
                'num1', random.random())
    for sspec in [('field1', 'asc'), ('num1', 'desc')]:
        cmd = ['FT.SEARCH', 'idx', 'txt', 'SORTBY', sspec[0], sspec[1]]
        res = env.cmd(*cmd)
        env.restart_and_reload()
        res2 = env.cmd(*cmd)
        env.assertEqual(res, res2)
Beispiel #16
0
def testDependenciesBasicExportImport():
    env = Env(moduleArgs='CreateVenv 1')
    conn = getConnectionByEnv(env)

    #disable rdb save
    res, err = env.cmd(
        'RG.PYEXECUTE',
        "GB('ShardsIDReader').foreach(lambda x: execute('config', 'set', 'save', '')).run()"
    )

    env.expect('RG.PYEXECUTE', "import redisgraph", 'REQUIREMENTS',
               'redisgraph').ok()
    md, data = env.cmd('RG.PYEXPORTREQ', 'redisgraph')
    env.assertEqual(md[5], 'yes')
    env.assertEqual(md[7], 'yes')
    env.stop()
    env.start()
    conn = getConnectionByEnv(env)
    env.expect('RG.PYDUMPREQS').equal([])
    env.expect('RG.PYIMPORTREQ', *data).equal('OK')
    res, err = env.cmd(
        'RG.PYEXECUTE',
        "GB('ShardsIDReader').flatmap(lambda x: execute('RG.PYDUMPREQS')).run()"
    )
    env.assertEqual(len(err), 0)
    env.assertEqual(len(res), env.shardsCount)
    for r in res:
        env.assertContains("'IsDownloaded', 'yes', 'IsInstalled', 'yes'", r)
Beispiel #17
0
def testDocTable():
    env = Env(moduleArgs='MAXDOCTABLESIZE 100')
    env.assertOk(
        env.execute_command('ft.create', 'idx', 'ON', 'HASH', 'schema',
                            'title', 'text', 'body', 'text'))
    # doc table size is 100 so insearting 1000 docs should gives us 10 docs in each bucket
    for i in range(1000):
        env.assertOk(
            env.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
                                'title', 'hello world %d' % (i % 100), 'body',
                                'lorem ist ipsum'))

    for i in range(100):
        res = env.execute_command('ft.search', 'idx', 'hello world %d' % i)
        env.assertEqual(res[0], 10)

    # deleting the first 100 docs
    for i in range(100):
        env.assertEqual(env.execute_command('ft.del', 'idx', 'doc%d' % i), 1)

    for i in range(100):
        res = env.execute_command('ft.search', 'idx', 'hello world %d' % i)
        env.assertEqual(res[0], 9)

    env.assertOk(env.execute_command('ft.drop', 'idx'))
Beispiel #18
0
def test_parallelism():
    env = Env(moduleArgs='INTRA_OP_PARALLELISM 1 INTER_OP_PARALLELISM 1')
    if not TEST_PT:
        env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
        return

    con = env.getConnection()

    model_pb = load_file_content('pt-minimal.pt')

    ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
    ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
    ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
    ensureSlaveSynced(con, env)
    con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}')
    ensureSlaveSynced(con, env)
    values = con.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
    env.assertEqual(values, [b'4', b'6', b'4', b'6'])
    load_time_config = {k.split(":")[0]: k.split(":")[1]
                        for k in con.execute_command("INFO MODULES").decode().split("#")[3].split()[1:]}
    env.assertEqual(load_time_config["ai_inter_op_parallelism"], "1")
    env.assertEqual(load_time_config["ai_intra_op_parallelism"], "1")

    env = Env(moduleArgs='INTRA_OP_PARALLELISM 2 INTER_OP_PARALLELISM 2')
    load_time_config = {k.split(":")[0]: k.split(":")[1]
                        for k in con.execute_command("INFO MODULES").decode().split("#")[3].split()[1:]}
    env.assertEqual(load_time_config["ai_inter_op_parallelism"], "2")
    env.assertEqual(load_time_config["ai_intra_op_parallelism"], "2")
Beispiel #19
0
def testDelReplicate():
    env = Env(useSlaves=True, forceTcp=True)
    env.skipOnCluster()

    master = env.getConnection()
    slave = env.getSlaveConnection()
    env.assertContains("PONG", master.execute_command("ping"))
    env.assertContains("PONG", slave.execute_command("ping"))
    env.assertOk(
        master.execute_command('ft.create', 'idx', 'schema', 'f', 'text'))

    checkSlaveSynced(env, slave, ('exists', 'idx:idx'), 1, time_out=20)

    for i in range(10):
        master.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
                               'f', 'hello world')

    checkSlaveSynced(env,
                     slave, ('ft.get', 'idx', 'doc9'), ['f', 'hello world'],
                     time_out=20)

    for i in range(10):
        # checking for insertion
        env.assertEqual(['f', 'hello world'],
                        master.execute_command('ft.get', 'idx', 'doc%d' % i))
        env.assertEqual(['f', 'hello world'],
                        slave.execute_command('ft.get', 'idx', 'doc%d' % i))

        # deleting
        env.assertEqual(
            1, master.execute_command('ft.del', 'idx', 'doc%d' % i, 'DD'))

    checkSlaveSynced(env, slave, ('ft.get', 'idx', 'doc9'), None, time_out=20)

    for i in range(10):
        # checking for deletion
        env.assertEqual(None,
                        master.execute_command('ft.get', 'idx', 'doc%d' % i))
        env.assertEqual(None,
                        slave.execute_command('ft.get', 'idx', 'doc%d' % i))
Beispiel #20
0
class testExample():
    '''
    run all tests on a single env without taking
    env down between tests
    '''
    def __init__(self):
        self.env = Env()

    def setUp(self):
        self.env.debugPrint('setUp', True)
        self.env.cmd('set', 'foo', 'bar')

    def tearDown(self):
        self.env.debugPrint('tearDown', True)
        self.env.expect('get', 'foo').equal('bar')
        self.env.cmd('flushall')

    def testExample(self):
        con = self.env.getConnection()
        con.set('x', 1)
        self.env.assertEqual(con.get('x'), '1')

    def testExample1(self):
        con = self.env.getConnection()
        con.set('x', 1)
        self.env.assertEqual(con.get('x'), '1')
        self.env.assertFalse(True)  # check failure

    def testExample2(self):
        con = self.env.getConnection()
        con.set('x', 1)
        self.env.assertEqual(con.get('x'), '1')
Beispiel #21
0
def test_madd_some_failed_replicas():
    if not Env().useSlaves:
        Env().skip()
    # getSlaveConnection is not supported in cluster mode
    Env().skipOnCluster()
    env = Env(decodeResponses=False)
    with env.getClusterConnectionIfNeeded() as r:
        r.execute_command("ts.create", "test_key1", "DUPLICATE_POLICY",
                          "block")
        r.execute_command("ts.madd", "test_key1", 123, 11, "test_key1", 124,
                          12)
        r.execute_command("ts.madd", "test_key1", 122, 11, "test_key1", 123,
                          11, "test_key1", 124, 12, "test_key1", 125, 12)
        r.execute_command("wait", 1, 0)
        env.assertEqual(
            r.execute_command("ts.range", "test_key1", "-", "+"),
            [[122, b'11'], [123, b'11'], [124, b'12'], [125, b'12']])

    with env.getSlaveConnection() as r:
        env.assertEqual(
            r.execute_command("ts.range", "test_key1", "-", "+"),
            [[122, b'11'], [123, b'11'], [124, b'12'], [125, b'12']])

    if env.useAof and isinstance(env.envRunner, StandardEnv):
        cmds = aof_parser.parse_file(
            os.path.join(env.envRunner.dbDirPath,
                         env.envRunner._getFileName('master', '.aof')))
        cmds = filter(lambda c: c[0].lower().startswith('ts.'), cmds)
        env.assertEqual(
            list(cmds),
            [['ts.create', 'test_key1', 'DUPLICATE_POLICY', 'block'],
             ['TS.MADD', 'test_key1', '123', '11', 'test_key1', '124', '12'],
             ['TS.MADD', 'test_key1', '122', '11', 'test_key1', '125', '12']])
Beispiel #22
0
def testExt():
    if not os.path.exists(EXTPATH):
        raise Exception(
            "Path ({}) does not exist. "
            "Run from the build directory or set EXT_TEST_PATH in the environment"
            .format(EXTPATH))

    # extentionPath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/../tests/ext-example/example.so')
    env = Env(moduleArgs='EXTLOAD %s' % EXTPATH)

    N = 100
    env.assertOk(env.execute_command('ft.create', 'idx', 'schema', 'f',
                                     'text'))
    for i in range(N):
        env.assertOk(
            env.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
                                'f', 'hello world'))
    res = env.execute_command('ft.search', 'idx', 'hello world')
    env.assertEqual(N, res[0])
    res = env.execute_command('ft.search', 'idx', 'hello world', 'scorer',
                              'filterout_scorer')
    env.assertEqual(0, res[0])
Beispiel #23
0
class test_prev_rdb_decode(FlowTestsBase):
    def __init__(self):
        self.env = Env(decodeResponses=True)
        global redis_con
        redis_con = self.env.getConnection()

    def tearDown(self):
        self.env.flush()

    def test_v6_decode(self):
        graph_name = "v6_rdb_restore"
        # dump created with the following query (v6 supported property value: integer, double, boolean, string, null, array)
        #  graph.query g "CREATE (:L1 {val:1, strval: 'str', numval: 5.5, nullval: NULL, boolval: true, array: [1,2,3]})-[:E{val:2}]->(:L2{val:3})"
        #  graph.query g "CREATE INDEX ON :L1(val)"
        #  dump g
        v6_rdb = b"\a\x81\x82\xb6\xa9\x85\xd6\xadh\x06\x05\x02g\x00\x02\x06\x05\x04val\x00\x05\astrval\x00\x05\anumval\x00\x05\bnullval\x00\x05\bboolval\x00\x05\x06array\x00\x02\x02\x02\x00\x05\x03L1\x00\x02\x01\x02\x00\x05\x04val\x00\x02\x01\x05\x03L2\x00\x02\x00\x02\x01\x02\x00\x05\x02E\x00\x02\x00\x02\x02\x02\x01\x02\x00\x02\x06\x05\x04val\x00\x02`\x00\x02\x01\x05\astrval\x00\x02H\x00\x05\x04str\x00\x05\anumval\x00\x02\x80\x00\x00@\x00\x04\x00\x00\x00\x00\x00\x00\x16@\x05\bnullval\x00\x02\x80\x00\x00\x80\x00\x05\bboolval\x00\x02P\x00\x02\x01\x05\x06array\x00\x02\b\x02\x03\x02`\x00\x02\x01\x02`\x00\x02\x02\x02`\x00\x02\x03\x02\x01\x02\x01\x02\x01\x05\x04val\x00\x02`\x00\x02\x03\x02\x01\x02\x00\x02\x01\x02\x00\x02\x01\x05\x04val\x00\x02`\x00\x02\x02\x00\t\x00\xd9\r\xb4c\xf2Z\xd9\xb3"
        redis_con.restore(graph_name, 0, v6_rdb, True)
        redis_graph = Graph(graph_name, redis_con)
        node0 = Node(node_id=0,
                     label='L1',
                     properties={
                         'val': 1,
                         'strval': 'str',
                         'numval': 5.5,
                         'boolval': True,
                         'array': [1, 2, 3]
                     })
        node1 = Node(node_id=1, label='L2', properties={'val': 3})
        edge01 = Edge(src_node=0,
                      relation='E',
                      dest_node=1,
                      edge_id=0,
                      properties={'val': 2})
        results = redis_graph.query("MATCH (n)-[e]->(m) RETURN n, e, m")
        self.env.assertEqual(results.result_set, [[node0, edge01, node1]])
        plan = redis_graph.execution_plan("MATCH (n:L1 {val:1}) RETURN n")
        self.env.assertIn("Index Scan", plan)
        results = redis_graph.query("MATCH (n:L1 {val:1}) RETURN n")
        self.env.assertEqual(results.result_set, [[node0]])
Beispiel #24
0
def testAofRewriteTags():
    env = Env(useAof=True)
    env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
            'TEXT', 'SORTABLE', 'bar', 'TAG')
    env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
    env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')

    info_a = to_dict(env.cmd('FT.INFO', 'idx'))
    env.restart_and_reload()
    info_b = to_dict(env.cmd('FT.INFO', 'idx'))
    env.assertEqual(info_a['fields'], info_b['fields'])

    # Try to drop the schema
    env.cmd('FT.DROP', 'idx')

    # Try to create it again - should work!
    env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
            'TEXT', 'SORTABLE', 'bar', 'TAG')
    env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
    env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
    res = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
                  'RETURN', '1', 'foo', 'WITHSORTKEYS')
    env.assertEqual([2L, '1', '$a', ['foo', 'A'],
                     '2', '$b', ['foo', 'B']], res)
def testAof(env):
    env = Env(moduleArgs='CreateVenv 1', useAof=True)
    conn = getConnectionByEnv(env)
    env.expect('RG.PYEXECUTE', "import redisgraph", 'REQUIREMENTS', 'redisgraph').ok()

    res, err = env.cmd('RG.PYEXECUTE', "GB('ShardsIDReader').flatmap(lambda x: execute('RG.PYDUMPREQS')).run()")
    env.assertEqual(len(err), 0)
    env.assertEqual(len(res), env.shardsCount)
    for r in res:
        env.assertContains("'IsDownloaded', 'yes', 'IsInstalled', 'yes'", r)

    env.broadcast('debug', 'loadaof')

    res, err = env.cmd('RG.PYEXECUTE', "GB('ShardsIDReader').flatmap(lambda x: execute('RG.PYDUMPREQS')).run()")
    env.assertEqual(len(err), 0)
    env.assertEqual(len(res), env.shardsCount)
    for r in res:
        env.assertContains("'IsDownloaded', 'yes', 'IsInstalled', 'yes'", r)    
class TestIGet():

    def __init__(self):
        self.env = Env()

    def test_iget_should_return_error_when_missing_key(self):
        self.env.cmd('FLUSHALL')
        self.env.expect('iget').error()

    def test_iget_should_return_error_when_missing_parameters(self):
        self.env.cmd('FLUSHALL')
        self.env.expect('iget', 'intervals').error()

    def test_iget_should_return_empty_when_member_not_exists(self):
        self.env.cmd('FLUSHALL')
        self.env.expect('iget', 'intervals', 'i1').equal([])

    def test_iget_should_return_i1(self):
        self.env.cmd('FLUSHALL')
        self.env.cmd('iadd', 'intervals', '0,1', 'i1')
        res = self.env.cmd('iget', 'intervals', 'i1')
        self.env.assertEqual(len(res), 1)
        self.env.assertContains([b'i1', b'1', b'0', b'1', b'1'], res)

    def test_iget_should_return_i2(self):
        self.env.cmd('FLUSHALL')
        self.env.cmd('iadd', 'intervals', '0,1', 'i1')
        self.env.cmd('iadd', 'intervals', '2,3', 'i2')
        res = self.env.cmd('iget', 'intervals', 'i2')
        self.env.assertEqual(len(res), 1)
        self.env.assertContains([b'i2', b'1', b'2', b'1', b'3'], res)

    def test_iget_should_return_i1_and_i2(self):
        self.env.cmd('FLUSHALL')
        self.env.cmd('iadd', 'intervals', '0,1', 'i1')
        self.env.cmd('iadd', 'intervals', '2,3', 'i2')
        res = self.env.cmd('iget', 'intervals', 'i1', 'i2')
        self.env.assertEqual(len(res), 2)
        self.env.assertContains([b'i1', b'1', b'0', b'1', b'1'], res)
        self.env.assertContains([b'i2', b'1', b'2', b'1', b'3'], res)

    def test_iget_should_return_only_existing_member(self):
        self.env.cmd('FLUSHALL')
        self.env.cmd('iadd', 'intervals', '0,1', 'i1')
        self.env.cmd('iadd', 'intervals', '2,3', 'i2')
        self.env.expect('iget', 'intervals', 'i1', 'i3').equal([[b'i1', b'1', b'0', b'1', b'1']])

    def test_iget_should_return_empty_because_no_members_exists(self):
        self.env.cmd('FLUSHALL')
        self.env.cmd('iadd', 'intervals', '0,1', 'i1')
        self.env.cmd('iadd', 'intervals', '2,3', 'i2')
        self.env.expect('iget', 'intervals', 'i3', 'i4').equal([])
Beispiel #27
0
def test_parallelism():
    env = Env(moduleArgs='INTRA_OP_PARALLELISM 1 INTER_OP_PARALLELISM 1')
    if not TEST_ONNX:
        env.debugPrint("skipping {} since TEST_ONNX=0".format(
            sys._getframe().f_code.co_name),
                       force=True)
        return

    con = env.getConnection()
    model_pb = load_file_content('mnist.onnx')
    sample_raw = load_file_content('one.raw')

    ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB',
                              model_pb)
    env.assertEqual(ret, b'OK')
    con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB',
                        sample_raw)

    con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}',
                        'OUTPUTS', 1, 'b{1}')
    ensureSlaveSynced(con, env)
    values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
    argmax = max(range(len(values)), key=lambda i: values[i])
    env.assertEqual(argmax, 1)

    load_time_config = {
        k.split(":")[0]: k.split(":")[1]
        for k in con.execute_command("INFO MODULES").decode().split("#")
        [3].split()[1:]
    }
    env.assertEqual(load_time_config["ai_inter_op_parallelism"], "1")
    env.assertEqual(load_time_config["ai_intra_op_parallelism"], "1")

    env = Env(moduleArgs='INTRA_OP_PARALLELISM 2 INTER_OP_PARALLELISM 2')
    load_time_config = {
        k.split(":")[0]: k.split(":")[1]
        for k in con.execute_command("INFO MODULES").decode().split("#")
        [3].split()[1:]
    }
    env.assertEqual(load_time_config["ai_inter_op_parallelism"], "2")
    env.assertEqual(load_time_config["ai_intra_op_parallelism"], "2")
Beispiel #28
0
class FlowTestsBase(object):
    def __init__(self):
        self.env = Env()
        redis_con = self.env.getConnection()
        redis_con.execute_command("FLUSHALL")

    def _assert_equalish(self, a, b, e=0.05):
        delta = a * e
        diff = abs(a - b)
        if diff > delta:
            warnings.warn('runtimes differ by more than \"%f\" percent' % e)

    def _assert_only_expected_results_are_in_actual_results(
            self, actual_result, query_info):
        actual_result_set = []
        if actual_result.result_set is not None:
            actual_result_set = actual_result.result_set

        # Assert number of results.
        self.env.assertEqual(len(actual_result_set),
                             len(query_info.expected_result))

        # Assert actual values vs expected values.
        for res in query_info.expected_result:
            self.env.assertIn(res, actual_result_set)

    def _assert_actual_results_contained_in_expected_results(
            self, actual_result, query_info, num_contained_results):
        actual_result_set = actual_result.result_set

        # Assert num results.
        self.env.assertEqual(len(actual_result_set), num_contained_results)

        # Assert actual values vs expected values.
        expected_result = query_info.expected_result
        count = len(
            [res for res in expected_result if res in actual_result_set])

        # Assert number of different results is as expected.
        self.env.assertEqual(count, num_contained_results)

    def _assert_run_time(self, actual_result, query_info):
        if actual_result.run_time_ms > query_info.max_run_time_ms:
            warnings.warn(
                'Maximum runtime for query \"%s\" was: %s, but should be %s' %
                (query_info.description, str(actual_result.run_time_ms),
                 str(query_info.max_run_time_ms)))
def test_ts_del_multi_chunk():
    for CHUNK_TYPE in ["compressed","uncompressed"]:
        sample_len = 1
        e = Env()
        with e.getClusterConnectionIfNeeded() as r:
            r.execute_command("ts.create", 'test_key', CHUNK_TYPE)
            while(_get_ts_info(r, 'test_key').chunk_count<2):
                assert sample_len == r.execute_command("ts.add", 'test_key', sample_len, '1')
                sample_len = sample_len + 1
            sample_len = sample_len -1
            res = r.execute_command('ts.range', 'test_key', 0, sample_len - 1)
            i = 1
            for sample in res:
                e.assertEqual(sample, [i, '1'.encode('ascii')])
                i += 1
            assert sample_len - 1 == r.execute_command('ts.del', 'test_key', 0, sample_len - 1)
            res = r.execute_command('ts.range', 'test_key', 0, sample_len)
            e.assertEqual(_get_ts_info(r, 'test_key').chunk_count,1)
            e.assertEqual(len(res), 1)
        e.flush()
Beispiel #30
0
def testAbortExecution():
    env = Env(moduleArgs='executionThreads 1')
    env.skipOnCluster()
    infinitScript = '''
def InfinitLoop(r):
    import time
    while True:
        time.sleep(0.1)
    return r
GB().map(InfinitLoop).run()
    '''
    env.cmd('set', 'x', '1')

    executionId1 = env.cmd('rg.pyexecute', infinitScript, 'unblocking')
    executionId2 = env.cmd('rg.pyexecute', infinitScript, 'unblocking')
    try:
        with TimeLimit(2):
            status = None
            while status != 'running':
                status = env.cmd('RG.GETEXECUTION', executionId1)[0][3][1]
                time.sleep(0.1)
    except Exception as e:
        env.assertTrue(False,
                       message='Could not wait for execution to start running')

    status2 = env.cmd('RG.GETEXECUTION', executionId2)[0][3][1]
    env.assertEqual(status2, 'created')

    env.expect('rg.abortexecution', executionId2).ok()
    res = env.cmd('RG.GETEXECUTION', executionId2)
    env.assertEqual(res[0][3][1], 'aborted')

    env.expect('rg.abortexecution', executionId1).ok()
    res = env.cmd('RG.GETEXECUTION', executionId1)
    env.assertEqual(res[0][3][1], 'done')
    env.assertEqual(len(res[0][3][9]), 1)  # number if error is one

    env.expect('rg.dropexecution', executionId1).ok()
    env.expect('rg.dropexecution', executionId2).ok()
Beispiel #31
0
class testPagerankFlow(FlowTestsBase):
    def __init__(self):
        self.env = Env()
        global redis_graph
        redis_con = self.env.getConnection()
        redis_graph = Graph(GRAPH_ID, redis_con)

    def test_pagerank(self):
        # Pagerank considers only nodes of given label and
        # relation of given relationship type
        # multiple edges between two nodes are considered as a single connection.
        queries = [
            # Single Label, single connection.
            "CREATE (a:L {v:1})-[:R]->(b:L {v:2})",
            # Single Label, multi connection.
            "CREATE (a:L {v:1})-[:R]->(b:L {v:2}), (a)-[:R]->(b)",
            # Multi Label, single connection.
            "CREATE (a:L {v:1})-[:R]->(b:L {v:2}), (:X)-[:R]->(:X)",
            # Multi Label, multi connection.
            "CREATE (a:L {v:1})-[:R]->(b:L {v:2}), (a)-[:R]->(b), (:X)-[:R]->(:X)"
        ]

        for q in queries:
            self.env.cmd('flushall')
            redis_graph.query(q)
            q = """CALL algo.pageRank('L', 'R') YIELD node, score RETURN node.v, score"""
            resultset = redis_graph.query(q).result_set

            # 2) 1) 1) (integer) 2
            # 2) "0.777813196182251"
            # 2) 1) (integer) 1
            # 2) "0.22218681871891"
            self.env.assertEqual(len(resultset), 2)
            self.env.assertEqual(resultset[0][0], 2)
            self.env.assertAlmostEqual(resultset[0][1], 0.777813196182251,
                                       0.0001)
            self.env.assertEqual(resultset[1][0], 1)
            self.env.assertAlmostEqual(resultset[1][1], 0.22218681871891,
                                       0.0001)
Beispiel #32
0
class testExample():
    '''
    run all tests on a single env without taking
    env down between tests
    '''
    def __init__(self):
        self.env = Env()

    def testExample(self):
        con = self.env.getConnection()
        con.set('x', 1)
        self.env.assertEqual(con.get('x'), '1')

    def testExample1(self):
        con = self.env.getConnection()
        con.set('x', 1)
        self.env.assertEqual(con.get('x'), '1')
        self.env.assertFalse(True)  # check failure

    def testExample2(self):
        con = self.env.getConnection()
        con.set('x', 1)
        self.env.assertEqual(con.get('x'), '1')
Beispiel #33
0
class testGetExecution:
    def __init__(self):
        self.env = Env()
        conn = getConnectionByEnv(self.env)
        conn.execute_command('SET', 'k1', 'spark')
        conn.execute_command('SET', 'k2', 'star')
        conn.execute_command('SET', 'k3', 'lambda')

    def testGettingANonExistingExecutionIdShouldError(self):
        self.env.expect('RG.GETEXECUTION', 'NoSuchExecutionId').raiseError()

    def testGettingAnExecutionPlanShouldSucceed(self):
        id = self.env.cmd('RG.PYEXECUTE',
                          "GB().map(lambda x: x['value']).run()", 'UNBLOCKING')
        time.sleep(1)
        res = self.env.cmd('RG.GETEXECUTION', id)
        self.env.assertEqual(res[0][3][7], 3)  # results
        self.env.assertEqual(len(res[0][3][9]), 0)  # errors
        self.env.cmd('RG.DROPEXECUTION', id)

    def testProfileExecutionsShouldBeDisabledByDefault(self):
        res = self.env.cmd('RG.CONFIGGET', 'ProfileExecutions')
        self.env.assertEqual(res[0], 0)

    def testExecutionShouldNotContainStepsDurationsWhenProfilingIsDisabled(
            self):
        res = self.env.cmd('RG.CONFIGSET', 'ProfileExecutions', 0)
        self.env.assertOk(res[0])
        id = self.env.cmd('RG.PYEXECUTE',
                          "GB().map(lambda x: x['value']).run()", 'UNBLOCKING')
        time.sleep(1)
        res = self.env.cmd('RG.GETEXECUTION', id)
        steps = res[0][3][15]
        self.env.assertLessEqual(1, len(steps))
        sdursum = 0
        for _, stype, _, sdur, _, sname, _, sarg in steps:
            sdursum += sdur
        self.env.assertEqual(sdursum, 0)
        self.env.cmd('RG.DROPEXECUTION', id)

    def testExecutionsShouldContainSomeStepsDurationsWhenProfilingIsEnabled(
            self):
        res = self.env.cmd('RG.CONFIGSET', 'ProfileExecutions', 1)
        self.env.assertOk(res[0])
        id = self.env.cmd('RG.PYEXECUTE',
                          "GB().flatmap(lambda x: x['value']).run()",
                          'UNBLOCKING')
        time.sleep(1)
        res = self.env.cmd(
            'RG.CONFIGSET', 'ProfileExecutions',
            0)  # TODO: consider running the basicTests class with profiling
        self.env.assertOk(res[0])
        res = self.env.cmd('RG.GETEXECUTION', id)
        steps = res[0][3][15]
        self.env.assertLessEqual(1, len(steps))
        sdursum = 0
        for _, stype, _, sdur, _, sname, _, sarg in steps:
            sdursum += sdur
        self.env.assertLessEqual(0, sdursum)
        self.env.cmd('RG.DROPEXECUTION', id)

    def testGetShardExecutionShouldSucceed(self):
        id = self.env.cmd('RG.PYEXECUTE', "GB().filter(lambda x: true).run()",
                          'UNBLOCKING')
        time.sleep(1)
        res = self.env.cmd('RG.GETEXECUTION', id, 'sHARD')
        self.env.assertEqual(1, len(res))
        self.env.cmd('RG.DROPEXECUTION', id)

    def testGetClusterExecutionShouldSucceedWhenInClusterMode(self):
        if self.env.shardsCount < 2:  # TODO: RedisGears_IsClusterMode reports false for clusters with 1 shard
            self.env.skip()
        id = self.env.cmd('RG.PYEXECUTE', "GB().map(lambda x: x).run()",
                          'UNBLOCKING')
        time.sleep(1)
        res = self.env.cmd('RG.GETEXECUTION', id, 'Cluster')
        self.env.assertLessEqual(1, len(res))
        self.env.cmd('RG.DROPEXECUTION', id)
class TestAggregate():
    def __init__(self):
        self.env = Env()
        add_values(self.env)

    def testGroupBy(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'SORTBY', 2, '@count', 'desc',
               'LIMIT', '0', '5'
               ]

        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual([292L, ['brand', '', 'count', '1518'], ['brand', 'mad catz', 'count', '43'],
                                    ['brand', 'generic', 'count', '40'], ['brand', 'steelseries', 'count', '37'],
                                    ['brand', 'logitech', 'count', '35']], res)

    def testMinMax(self):
        cmd = ['ft.aggregate', 'games', 'sony',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0',
               'REDUCE', 'min', '1', '@price', 'as', 'minPrice',
               'SORTBY', '2', '@minPrice', 'DESC']
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        row = to_dict(res[1])
        self.env.assertEqual(88, int(float(row['minPrice'])))

        cmd = ['ft.aggregate', 'games', 'sony',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0',
               'REDUCE', 'max', '1', '@price', 'as', 'maxPrice',
               'SORTBY', '2', '@maxPrice', 'DESC']
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        self.env.assertEqual(695, int(float(row['maxPrice'])))

    def testAvg(self):
        cmd = ['ft.aggregate', 'games', 'sony',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'avg', '1', '@price', 'AS', 'avg_price',
               'REDUCE', 'count', '0',
               'SORTBY', '2', '@avg_price', 'DESC']
        res = self.env.cmd(*cmd)
        self.env.assertIsNotNone(res)
        self.env.assertEqual(26, res[0])
        # Ensure the formatting actually exists

        first_row = to_dict(res[1])
        self.env.assertEqual(109, int(float(first_row['avg_price'])))

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertIn('avg_price', row)

        # Test aliasing
        cmd = ['FT.AGGREGATE', 'games', 'sony', 'GROUPBY', '1', '@brand',
               'REDUCE', 'avg', '1', '@price', 'AS', 'avgPrice']
        res = self.env.cmd(*cmd)
        first_row = to_dict(res[1])
        self.env.assertEqual(17, int(float(first_row['avgPrice'])))

    def testCountDistinct(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT_DISTINCT', '1', '@title', 'AS', 'count_distinct(title)',
               'REDUCE', 'COUNT', '0'
               ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1484, int(row['count_distinct(title)']))

        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT_DISTINCTISH', '1', '@title', 'AS', 'count_distinctish(title)',
               'REDUCE', 'COUNT', '0'
               ]
        res = self.env.cmd(*cmd)[1:]
        # print res
        row = to_dict(res[0])
        self.env.assertEqual(1461, int(row['count_distinctish(title)']))

    def testQuantile(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'QUANTILE', '2', '@price', '0.50', 'AS', 'q50',
               'REDUCE', 'QUANTILE', '2', '@price', '0.90', 'AS', 'q90',
               'REDUCE', 'QUANTILE', '2', '@price', '0.95', 'AS', 'q95',
               'REDUCE', 'AVG', '1', '@price',
               'REDUCE', 'COUNT', '0', 'AS', 'rowcount',
               'SORTBY', '2', '@rowcount', 'DESC', 'MAX', '1']

        res = self.env.cmd(*cmd)
        row = to_dict(res[1])
        # TODO: Better samples
        self.env.assertAlmostEqual(14.99, float(row['q50']), delta=3)
        self.env.assertAlmostEqual(70, float(row['q90']), delta=50)

        # This tests the 95th percentile, which is error prone because
        # so few samples actually exist. I'm disabling it for now so that
        # there is no breakage in CI
        # self.env.assertAlmostEqual(110, (float(row['q95'])), delta=50)

    def testStdDev(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'STDDEV', '1', '@price', 'AS', 'stddev(price)',
               'REDUCE', 'AVG', '1', '@price', 'AS', 'avgPrice',
               'REDUCE', 'QUANTILE', '2', '@price', '0.50', 'AS', 'q50Price',
               'REDUCE', 'COUNT', '0', 'AS', 'rowcount',
               'SORTBY', '2', '@rowcount', 'DESC',
               'LIMIT', '0', '10']
        res = self.env.cmd(*cmd)
        row = to_dict(res[1])

        self.env.assertTrue(10 <= int(
            float(row['q50Price'])) <= 20)
        self.env.assertAlmostEqual(53, int(float(row['stddev(price)'])), delta=50)
        self.env.assertEqual(29, int(float(row['avgPrice'])))

    def testParseTime(self):
        cmd = ['FT.AGGREGATE', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT', '0', 'AS', 'count',
               'APPLY', 'timefmt(1517417144)', 'AS', 'dt',
               'APPLY', 'parse_time("%FT%TZ", @dt)', 'as', 'parsed_dt',
               'LIMIT', '0', '1']
        res = self.env.cmd(*cmd)

        self.env.assertEqual(['brand', '', 'count', '1518', 'dt',
                              '2018-01-31T16:45:44Z', 'parsed_dt', '1517417144'], res[1])

    def testRandomSample(self):
        cmd = ['FT.AGGREGATE', 'games', '*', 'GROUPBY', '1', '@brand',
               'REDUCE', 'COUNT', '0', 'AS', 'num',
               'REDUCE', 'RANDOM_SAMPLE', '2', '@price', '10',
               'SORTBY', '2', '@num', 'DESC', 'MAX', '10']
        for row in self.env.cmd(*cmd)[1:]:
            self.env.assertIsInstance(row[5], list)
            self.env.assertGreater(len(row[5]), 0)
            self.env.assertGreaterEqual(row[3], len(row[5]))

            self.env.assertLessEqual(len(row[5]), 10)

    def testTimeFunctions(self):
        cmd = ['FT.AGGREGATE', 'games', '*',

               'APPLY', '1517417144', 'AS', 'dt',
               'APPLY', 'timefmt(@dt)', 'AS', 'timefmt',
               'APPLY', 'day(@dt)', 'AS', 'day',
               'APPLY', 'hour(@dt)', 'AS', 'hour',
               'APPLY', 'minute(@dt)', 'AS', 'minute',
               'APPLY', 'month(@dt)', 'AS', 'month',
               'APPLY', 'dayofweek(@dt)', 'AS', 'dayofweek',
               'APPLY', 'dayofmonth(@dt)', 'AS', 'dayofmonth',
               'APPLY', 'dayofyear(@dt)', 'AS', 'dayofyear',
               'APPLY', 'year(@dt)', 'AS', 'year',

               'LIMIT', '0', '1']
        res = self.env.cmd(*cmd)
        self.env.assertListEqual([1L, ['dt', '1517417144', 'timefmt', '2018-01-31T16:45:44Z', 'day', '1517356800', 'hour', '1517414400',
                                       'minute', '1517417100', 'month', '1514764800', 'dayofweek', '3', 'dayofmonth', '31', 'dayofyear', '30', 'year', '2018']], res)

    def testStringFormat(self):
        cmd = ['FT.AGGREGATE', 'games', '@brand:sony',
               'GROUPBY', '2', '@title', '@brand',
               'REDUCE', 'COUNT', '0',
               'REDUCE', 'MAX', '1', '@price', 'AS', 'price',
               'APPLY', 'format("%s|%s|%s|%s", @title, @brand, "Mark", @price)', 'as', 'titleBrand',
               'LIMIT', '0', '10']
        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            expected = '%s|%s|%s|%g' % (
                row['title'], row['brand'], 'Mark', float(row['price']))
            self.env.assertEqual(expected, row['titleBrand'])

    def testSum(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'REDUCE', 'sum', 1, '@price', 'AS', 'sum(price)',
               'SORTBY', 2, '@sum(price)', 'desc',
               'LIMIT', '0', '5'
               ]
        res = self.env.cmd(*cmd)
        self.env.assertEqual([292L, ['brand', '', 'count', '1518', 'sum(price)', '44780.69'],
                             ['brand', 'mad catz', 'count',
                                 '43', 'sum(price)', '3973.48'],
                             ['brand', 'razer', 'count', '26',
                                 'sum(price)', '2558.58'],
                             ['brand', 'logitech', 'count',
                                 '35', 'sum(price)', '2329.21'],
                             ['brand', 'steelseries', 'count', '37', 'sum(price)', '1851.12']], res)

    def testFilter(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'FILTER', '@count > 5'
               ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertGreater(int(row['count']), 5)

        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count', '0', 'AS', 'count',
               'FILTER', '@count < 5',
               'FILTER', '@count > 2 && @brand != ""'
               ]

        res = self.env.cmd(*cmd)
        for row in res[1:]:
            row = to_dict(row)
            self.env.assertLess(int(row['count']), 5)
            self.env.assertGreater(int(row['count']), 2)

    def testToList(self):
        cmd = ['ft.aggregate', 'games', '*',
               'GROUPBY', '1', '@brand',
               'REDUCE', 'count_distinct', '1', '@price', 'as', 'count',
               'REDUCE', 'tolist', 1, '@price', 'as', 'prices',
               'SORTBY', 2, '@count', 'desc',
               'LIMIT', '0', '5'
               ]
        res = self.env.cmd(*cmd)

        for row in res[1:]:
            row = to_dict(row)
            self.env.assertEqual(int(row['count']), len(row['prices']))

    def testSortBy(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'SORTBY', 2, '@price', 'desc',
                           'LIMIT', '0', '2')

        self.env.assertListEqual([292L, ['brand', '', 'price', '44780.69'], [
                                 'brand', 'mad catz', 'price', '3973.48']], res)

        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'SORTBY', 2, '@price', 'asc',
                           'LIMIT', '0', '2')

        self.env.assertListEqual([292L, ['brand', 'myiico', 'price', '0.23'], [
                                 'brand', 'crystal dynamics', 'price', '0.25']], res)

        # Test MAX with limit higher than it
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'SORTBY', 2, '@price', 'asc', 'MAX', 2)
        
        self.env.assertListEqual([292L, ['brand', 'myiico', 'price', '0.23'], [
                                 'brand', 'crystal dynamics', 'price', '0.25']], res)

        # Test Sorting by multiple properties
        res = self.env.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand',
                           'REDUCE', 'sum', 1, '@price', 'as', 'price',
                           'APPLY', '(@price % 10)', 'AS', 'price',
                           'SORTBY', 4, '@price', 'asc', '@brand', 'desc', 'MAX', 10,
                           )
        self.env.assertListEqual([292L, ['brand', 'zps', 'price', '0'], ['brand', 'zalman', 'price', '0'], ['brand', 'yoozoo', 'price', '0'], ['brand', 'white label', 'price', '0'], ['brand', 'stinky', 'price', '0'], [
                                 'brand', 'polaroid', 'price', '0'], ['brand', 'plantronics', 'price', '0'], ['brand', 'ozone', 'price', '0'], ['brand', 'oooo', 'price', '0'], ['brand', 'neon', 'price', '0']], res)

    def testExpressions(self):
        pass

    def testNoGroup(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'LOAD', '2', '@brand', '@price',
                           'APPLY', 'floor(sqrt(@price)) % 10', 'AS', 'price',
                           'SORTBY', 4, '@price', 'desc', '@brand', 'desc', 'MAX', 5,
                           )
        exp = [2265L,
 ['brand', 'xbox', 'price', '9'],
 ['brand', 'turtle beach', 'price', '9'],
 ['brand', 'trust', 'price', '9'],
 ['brand', 'steelseries', 'price', '9'],
 ['brand', 'speedlink', 'price', '9']]
        # exp = [2265L, ['brand', 'Xbox', 'price', '9'], ['brand', 'Turtle Beach', 'price', '9'], [
                            #  'brand', 'Trust', 'price', '9'], ['brand', 'SteelSeries', 'price', '9'], ['brand', 'Speedlink', 'price', '9']]
        self.env.assertListEqual(exp[1], res[1])

    def testLoad(self):
        res = self.env.cmd('ft.aggregate', 'games', '*',
                           'LOAD', '3', '@brand', '@price', '@nonexist',
                           'SORTBY', 2, '@price', 'DESC', 'MAX', 2)
        exp = [3L, ['brand', '', 'price', '759.12'], ['brand', 'Sony', 'price', '695.8']]
        self.env.assertEqual(exp[1], res[1])

    def testSplit(self):
        res = self.env.cmd('ft.aggregate', 'games', '*', 'APPLY', 'split("hello world,  foo,,,bar,", ",", " ")', 'AS', 'strs',
                           'APPLY', 'split("hello world,  foo,,,bar,", " ", ",")', 'AS', 'strs2',
                           'APPLY', 'split("hello world,  foo,,,bar,", "", "")', 'AS', 'strs3',
                           'APPLY', 'split("hello world,  foo,,,bar,")', 'AS', 'strs4',
                           'APPLY', 'split("hello world,  foo,,,bar,",",")', 'AS', 'strs5',
                           'APPLY', 'split("")', 'AS', 'empty',
                           'LIMIT', '0', '1'
                           )
        # print "Got {} results".format(len(res))
        # return
        # pprint.pprint(res)
        self.env.assertListEqual([1L, ['strs', ['hello world', 'foo', 'bar'],
                                       'strs2', ['hello', 'world', 'foo,,,bar'],
                                       'strs3', ['hello world,  foo,,,bar,'],
                                       'strs4', ['hello world', 'foo', 'bar'],
                                       'strs5', ['hello world', 'foo', 'bar'],
                                       'empty', []]], res)

    def testFirstValue(self):
        res = self.env.cmd('ft.aggregate', 'games', '@brand:(sony|matias|beyerdynamic|(mad catz))',
                           'GROUPBY', 1, '@brand',
                           'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price', 'DESC', 'AS', 'top_item',
                           'REDUCE', 'FIRST_VALUE', 4, '@price', 'BY', '@price', 'DESC', 'AS', 'top_price',
                           'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price', 'ASC', 'AS', 'bottom_item',
                           'REDUCE', 'FIRST_VALUE', 4, '@price', 'BY', '@price', 'ASC', 'AS', 'bottom_price',
                           'SORTBY', 2, '@top_price', 'DESC', 'MAX', 5
                           )
        expected = [4L, ['brand', 'sony', 'top_item', 'sony psp slim &amp; lite 2000 console', 'top_price', '695.8', 'bottom_item', 'sony dlchd20p high speed hdmi cable for playstation 3', 'bottom_price', '5.88'],
                                 ['brand', 'matias', 'top_item', 'matias halfkeyboard usb', 'top_price',
                                     '559.99', 'bottom_item', 'matias halfkeyboard usb', 'bottom_price', '559.99'],
                                 ['brand', 'beyerdynamic', 'top_item', 'beyerdynamic mmx300 pc gaming premium digital headset with microphone', 'top_price', '359.74',
                                     'bottom_item', 'beyerdynamic headzone pc gaming digital surround sound system with mmx300 digital headset with microphone', 'bottom_price', '0'],
                                 ['brand', 'mad catz', 'top_item', 'mad catz s.t.r.i.k.e.7 gaming keyboard', 'top_price', '295.95', 'bottom_item', 'madcatz mov4545 xbox replacement breakaway cable', 'bottom_price', '3.49']]
        self.env.assertListEqual(expected, res)

    def testLoadAfterGroupBy(self):
        with self.env.assertResponseError():
            self.env.cmd('ft.aggregate', 'games', '*',
                         'GROUPBY', 1, '@brand',
                         'LOAD', 1, '@brand')