Esempio n. 1
0
def testGCShutDownOnExit(env):
    if env.env == 'existing-env' or env.env == 'enterprise' or env.isCluster() or platform.system() == 'Darwin':
        env.skip()
    env = Env(moduleArgs='GC_POLICY FORK FORKGC_SLEEP_BEFORE_EXIT 20')
    env.expect('FT.CREATE', 'idx', 'SCHEMA', 'title', 'TEXT', 'SORTABLE').ok()
    env.expect('FT.DEBUG', 'GC_FORCEBGINVOKE', 'idx').ok()
    env.stop()
    env.start()

    # make sure server started successfully
    env.expect('FT.CREATE', 'idx', 'SCHEMA', 'title', 'TEXT', 'SORTABLE').ok()
Esempio n. 2
0
def test_sdset_many():
    env = Env()
    env.cmd('rdict.sdset', 'myDict', '0.15', '10')
    env.cmd('rdict.sdset', 'myDict', '0.05', '13')
    env.cmd('rdict.sdset', 'myDict', '0.2', '20')
    env.expect('zrange', '{myDict}_sd2', '0',
               '-1').equal(['0.05', '0.15', '0.2'])
    env.expect('hlen', '{myDict}_sd1').equal(3)
    env.expect('hget', '{myDict}_sd1', '0.15').equal('10')
    env.expect('hget', '{myDict}_sd1', '0.05').equal('13')
    env.expect('hget', '{myDict}_sd1', '0.2').equal('20')
Esempio n. 3
0
def testSpellCheckResultsOrder():
    env = Env()
    env.cmd('ft.dictadd', 'dict', 'name')
    env.cmd('ft.create', 'idx', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
    env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'Elior', 'body',
            'body1')
    env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'Hila', 'body',
            'body2')
    env.expect('ft.spellcheck', 'idx',
               'Elioh Hilh').equal([['TERM', 'elioh', [['0.5', 'elior']]],
                                    ['TERM', 'hilh', [['0.5', 'hila']]]])
Esempio n. 4
0
def testBasicSpellCheckWithNoResult():
    env = Env()
    env.cmd('ft.create', 'idx', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
    env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body',
            'body1')
    env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body',
            'body2')
    env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body',
            'name2')
    env.expect('ft.spellcheck', 'idx',
               'somenotexiststext').equal([['TERM', 'somenotexiststext', []]])
Esempio n. 5
0
def testRegistersReplicatedToSlave():
    env = Env(useSlaves=True, env='oss')
    if env.envRunner.debugger is not None:
        env.skip() # valgrind is not working correctly with replication
    conn = getConnectionByEnv(env)
    env.cmd('rg.pyexecute', "GB().filter(lambda x: x['key'] != 'NumOfKeys')."
                            "foreach(lambda x: execute('incrby', 'NumOfKeys', ('1' if 'value' in x.keys() else '-1')))."
                            "register()")

    slaveConn = env.getSlaveConnection()
    try:
        with TimeLimit(5):
            res = []
            while len(res) < 1:
                res = slaveConn.execute_command('RG.DUMPREGISTRATIONS')
    except Exception:
        env.assertTrue(False, message='Failed waiting for Execution to reach slave')

    for i in range(5):
        conn.set(str(i), str(i))

    try:
        with TimeLimit(5):
            numOfKeys = '0'
            while numOfKeys != '5':
                numOfKeys = conn.get('NumOfKeys')
    except Exception:
        env.assertTrue(False, message='Failed waiting for keys to update')
    

    ## make sure registrations did not run on slave (if it did NumOfKeys would get to 200)
    try:
        with TimeLimit(5):
            numOfKeys = '0'
            while numOfKeys != '5':
                numOfKeys = slaveConn.get('NumOfKeys')
    except Exception:
        env.assertTrue(False, message='Failed waiting for keys to update')

    executions = env.cmd('RG.DUMPEXECUTIONS')
    for r in executions:
         env.expect('RG.DROPEXECUTION', r[1]).equal('OK')

    registrations = env.cmd('RG.DUMPREGISTRATIONS')
    for r in registrations:
         env.expect('RG.UNREGISTER', r[1]).equal('OK')

    try:
        with TimeLimit(5):
            res = slaveConn.execute_command('RG.DUMPREGISTRATIONS')
            while len(res) > 0:
                res = slaveConn.execute_command('RG.DUMPREGISTRATIONS')
    except Exception:
        env.assertTrue(False, message='Failed waiting for registration to unregister on slave')
Esempio n. 6
0
def testRecordSerializationFailure():
    env = Env(moduleArgs='CreateVenv 1')
    if env.shardsCount < 2:  # TODO: RedisGears_IsClusterMode reports false for clusters with 1 shard
        env.skip()
    conn = getConnectionByEnv(env)
    res = env.cmd(
        'RG.PYEXECUTE', "GB('ShardsIDReader')."
        "map(lambda x: __import__('redisgraph'))."
        "collect().distinct().run()", 'REQUIREMENTS', 'redisgraph')
    env.assertEqual(len(res[1]),
                    env.shardsCount - 1)  # the initiator will not raise error
Esempio n. 7
0
def testSpellCheckWithDuplications():
    env = Env()
    env.cmd('ft.dictadd', 'dict', 'name1', 'name4', 'name5')
    env.cmd('ft.create', 'idx', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
    env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
    env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
    env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
    env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'INCLUDE', 'dict').equal([['TERM', 'name',
                                                                                   [['0.66666666666666663', 'name2'],
                                                                                    ['0.33333333333333331', 'name1'],
                                                                                    ['0', 'name4'], ['0', 'name5']]]])
def test_create_params():
    with Env().getConnection() as r:
        # test string instead of value
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE invalid RETENTION retention')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE invalid CHUNK_SIZE chunk_size')

        r.execute_command('TS.CREATE a')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE a')  # filter exists
Esempio n. 9
0
def test_parallelism():
    env = Env(moduleArgs='INTRA_OP_PARALLELISM 1 INTER_OP_PARALLELISM 1')
    if not TEST_ONNX:
        env.debugPrint("skipping {} since TEST_ONNX=0".format(
            sys._getframe().f_code.co_name),
                       force=True)
        return

    con = env.getConnection()
    model_pb = load_file_content('mnist.onnx')
    sample_raw = load_file_content('one.raw')

    ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB',
                              model_pb)
    env.assertEqual(ret, b'OK')
    con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB',
                        sample_raw)

    con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}',
                        'OUTPUTS', 1, 'b{1}')
    ensureSlaveSynced(con, env)
    values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
    argmax = max(range(len(values)), key=lambda i: values[i])
    env.assertEqual(argmax, 1)

    load_time_config = {
        k.split(":")[0]: k.split(":")[1]
        for k in con.execute_command("INFO MODULES").decode().split("#")
        [3].split()[1:]
    }
    env.assertEqual(load_time_config["ai_inter_op_parallelism"], "1")
    env.assertEqual(load_time_config["ai_intra_op_parallelism"], "1")

    env = Env(moduleArgs='INTRA_OP_PARALLELISM 2 INTER_OP_PARALLELISM 2')
    load_time_config = {
        k.split(":")[0]: k.split(":")[1]
        for k in con.execute_command("INFO MODULES").decode().split("#")
        [3].split()[1:]
    }
    env.assertEqual(load_time_config["ai_inter_op_parallelism"], "2")
    env.assertEqual(load_time_config["ai_intra_op_parallelism"], "2")
Esempio n. 10
0
def testSpellCheckWrongArity():
    env = Env()
    env.cmd('ft.dictadd', 'dict', 'name')
    env.cmd('ft.create', 'idx', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
    env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body',
            'body1')
    env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body',
            'body2')
    env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body',
            'name2')
    env.expect('ft.spellcheck', 'idx').raiseError()
    env.expect('ft.spellcheck', 'idx').raiseError()
Esempio n. 11
0
def test_valid_labels():
    with Env().getClusterConnectionIfNeeded() as r:
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.CREATE', 'tester', 'LABELS', 'name', '')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.ADD', 'tester2', '*', 1, 'LABELS', 'name', 'myName', 'location', '')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.ADD', 'tester2', '*', 1, 'LABELS', 'name', 'myName', 'location', 'list)')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.ADD', 'tester2', '*', 1, 'LABELS', 'name', 'myName', 'location', 'li(st')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.ADD', 'tester2', '*', 1, 'LABELS', 'name', 'myName', 'location', 'lis,t')           
Esempio n. 12
0
def test_different_chunk_size():
    Env().skipOnCluster()
    with Env().getConnection() as r:
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.add', 'tester', "1636545188", "123",
                              'LABELS', 'id', 'abc1231232', 'CHUNK_SIZE', '0')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.add', 'tester', "1636545188", "123",
                              'LABELS', 'id', 'abc1231232', 'CHUNK_SIZE',
                              '-1000')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.add', 'tester', "1636545188", "123",
                              'LABELS', 'id', 'abc1231232', 'CHUNK_SIZE',
                              '100')
        with pytest.raises(redis.ResponseError) as excinfo:
            r.execute_command('TS.add', 'tester', "1636545188", "123",
                              'LABELS', 'id', 'abc1231232', 'CHUNK_SIZE',
                              '40000000')

        r.execute_command('TS.add', 'tester2', "1636545188", "123", 'LABELS',
                          'id', 'abc1231232', 'CHUNK_SIZE', '40000')
Esempio n. 13
0
def test_downsampling_rules(self):
    """
    Test downsmapling rules - avg,min,max,count,sum with 4 keys each.
    Downsample in resolution of:
    1sec (should be the same length as the original series),
    3sec (number of samples is divisible by 10),
    10s (number of samples is not divisible by 10),
    1000sec (series should be empty since there are not enough samples)
    Insert some data and check that the length, the values and the info of the downsample series are as expected.
    """
    with Env().getConnection() as r:
        assert r.execute_command('TS.CREATE', 'tester')
        rules = ['avg', 'sum', 'count', 'max', 'min']
        resolutions = [1, 3, 10, 1000]
        for rule in rules:
            for resolution in resolutions:
                assert r.execute_command(
                    'TS.CREATE', 'tester_{}_{}'.format(rule, resolution))
                assert r.execute_command(
                    'TS.CREATERULE', 'tester',
                    'tester_{}_{}'.format(rule, resolution), 'AGGREGATION',
                    rule, resolution)

        start_ts = 0
        samples_count = 501
        end_ts = start_ts + samples_count
        values = list(range(samples_count))
        _insert_data(r, 'tester', start_ts, samples_count, values)
        r.execute_command('TS.ADD', 'tester', 3000, 7.77)

        for rule in rules:
            for resolution in resolutions:
                actual_result = r.execute_command(
                    'TS.RANGE', 'tester_{}_{}'.format(rule, resolution),
                    start_ts, end_ts)
                assert len(actual_result) == math.ceil(samples_count /
                                                       float(resolution))
                expected_result = calc_rule(rule, values, resolution)
                assert _get_series_value(actual_result) == expected_result
                # last time stamp should be the beginning of the last bucket
                assert _get_ts_info(r, 'tester_{}_{}'.format(rule, resolution)).last_time_stamp == \
                       (samples_count - 1) - (samples_count - 1) % resolution

        # test for results after empty buckets
        r.execute_command('TS.ADD', 'tester', 6000, 0)
        for rule in rules:
            for resolution in resolutions:
                actual_result = r.execute_command(
                    'TS.RANGE', 'tester_{}_{}'.format(rule, resolution), 3000,
                    6000)
                assert len(actual_result) == 1
                assert _get_series_value(actual_result) == [7.77] or \
                       _get_series_value(actual_result) == [1]
Esempio n. 14
0
def delete_graph(graph_id):
    env = Env(decodeResponses=True)
    conn = env.getConnection()
    graph = Graph(graph_id, conn)

    # Try to delete graph.
    try:
        graph.delete()
        return True
    except:
        # Graph deletion failed.
        return False
Esempio n. 15
0
def test_extensive_ts_madd():
    Env().skipOnCluster()
    with Env(decodeResponses=True).getConnection() as r:
        r.execute_command("ts.create", 'test_key1')
        r.execute_command("ts.create", 'test_key2')
        pos = 1
        lines = []
        float_lines = []
        with open("lemire_canada.txt","r") as file:
            lines = file.readlines()
        for line in lines:
            float_v = float(line.strip())
            res = r.execute_command("ts.madd", 'test_key1', pos, float_v, 'test_key2', pos, float_v)
            assert res == [pos,pos]
            pos=pos+1
            float_lines.append(float_v)
        returned_floats = r.execute_command('ts.range', 'test_key1', "-", "+")
        assert len(returned_floats) == len(float_lines)
        for pos,datapoint in enumerate(returned_floats,start=1):
            assert pos == datapoint[0]
            assert float_lines[pos-1] == float(datapoint[1])
Esempio n. 16
0
 def test_is_tcp(self):
     self.env = Env(useSlaves=True,
                    env='oss',
                    logDir=self.test_dir,
                    redisBinaryPath=REDIS_BINARY,
                    redisEnterpriseBinaryPath=REDIS_ENTERPRISE_BINARY,
                    dmcBinaryPath=DMC_PROXY_BINARY)
     assert self.env.isCluster() == False
     assert self.env.isUp() == True
     assert self.env.isTcp() == True
     self.env.stop()
     assert self.env.isUp() == False
Esempio n. 17
0
def test_rdb_aggregation_context():
    """
    Check that the aggregation context of the rules is saved in rdb. Write data with not a full bucket,
    then save it and restore, add more data to the bucket and check the rules results considered the previous data
    that was in that bucket in their calculation. Check on avg and min, since all the other rules use the same
    context as min.
    """
    start_ts = 3
    samples_count = 4  # 1 full bucket and another one with 1 value
    key_name = 'tester{abc}'
    with Env().getClusterConnectionIfNeeded() as r:
        assert r.execute_command('TS.CREATE', key_name)
        assert r.execute_command('TS.CREATE', '{}_agg_avg_3'.format(key_name))
        assert r.execute_command('TS.CREATE', '{}_agg_min_3'.format(key_name))
        assert r.execute_command('TS.CREATE', '{}_agg_sum_3'.format(key_name))
        assert r.execute_command('TS.CREATE', '{}_agg_std_3'.format(key_name))
        assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_avg_3'.format(key_name), 'AGGREGATION', 'AVG', 3)
        assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_min_3'.format(key_name), 'AGGREGATION', 'MIN', 3)
        assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_sum_3'.format(key_name), 'AGGREGATION', 'SUM', 3)
        assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_std_3'.format(key_name), 'AGGREGATION', 'STD.S', 3)
        _insert_data(r, key_name, start_ts, samples_count, list(range(samples_count)))
        data_tester = r.execute_command('dump', key_name)
        data_avg_tester = r.execute_command('dump', '{}_agg_avg_3'.format(key_name))
        data_min_tester = r.execute_command('dump', '{}_agg_min_3'.format(key_name))
        data_sum_tester = r.execute_command('dump', '{}_agg_sum_3'.format(key_name))
        data_std_tester = r.execute_command('dump', '{}_agg_std_3'.format(key_name))
        r.execute_command('DEL',
                          key_name,
                          '{}_agg_avg_3'.format(key_name),
                          '{}_agg_min_3'.format(key_name),
                          '{}_agg_sum_3'.format(key_name),
                          '{}_agg_std_3'.format(key_name))
        r.execute_command('RESTORE', key_name, 0, data_tester)
        r.execute_command('RESTORE', '{}_agg_avg_3'.format(key_name), 0, data_avg_tester)
        r.execute_command('RESTORE', '{}_agg_min_3'.format(key_name), 0, data_min_tester)
        r.execute_command('RESTORE', '{}_agg_sum_3'.format(key_name), 0, data_sum_tester)
        r.execute_command('RESTORE', '{}_agg_std_3'.format(key_name), 0, data_std_tester)
        assert r.execute_command('TS.ADD', key_name, start_ts + samples_count, samples_count)
        assert r.execute_command('TS.ADD', key_name, start_ts + samples_count + 10, 0)  # closes the last time_bucket
        # if the aggregation context wasn't saved, the results were considering only the new value added
        expected_result_avg = [[start_ts, b'1'], [start_ts + 3, b'3.5']]
        expected_result_min = [[start_ts, b'0'], [start_ts + 3, b'3']]
        expected_result_sum = [[start_ts, b'3'], [start_ts + 3, b'7']]
        expected_result_std = [[start_ts, b'1'], [start_ts + 3, b'0.7071']]
        actual_result_avg = r.execute_command('TS.range', '{}_agg_avg_3'.format(key_name), start_ts, start_ts + samples_count)
        assert actual_result_avg == expected_result_avg
        actual_result_min = r.execute_command('TS.range', '{}_agg_min_3'.format(key_name), start_ts, start_ts + samples_count)
        assert actual_result_min == expected_result_min
        actual_result_sum = r.execute_command('TS.range', '{}_agg_sum_3'.format(key_name), start_ts, start_ts + samples_count)
        assert actual_result_sum == expected_result_sum
        actual_result_std = r.execute_command('TS.range', '{}_agg_std_3'.format(key_name), start_ts, start_ts + samples_count)
        assert actual_result_std[0] == expected_result_std[0]
        assert abs(float(actual_result_std[1][1]) - float(expected_result_std[1][1])) < ALLOWED_ERROR
Esempio n. 18
0
def test_partial_madd():
    Env().skipOnCluster()
    skip_on_rlec()
    with Env().getConnection() as r:
        r.execute_command("ts.create", 'test_key1')
        r.execute_command("ts.create", 'test_key2')
        r.execute_command("ts.create", 'test_key3')

        now = int(time.time() * 1000)
        res = r.execute_command("ts.madd", 'test_key1', "*", 10, 'test_key2',
                                2000, 20, 'test_key3', 3000, 30)
        assert now <= res[0]
        assert 2000 == res[1]
        assert 3000 == res[2]

        res = r.execute_command("ts.madd", 'test_key1', now + 1000, 10,
                                'test_key2', 1000, 20, 'test_key3', 3001, 30)
        assert (now + 1000, 1000, 3001) == (res[0], res[1], res[2])
        assert len(r.execute_command('ts.range', 'test_key1', "-", "+")) == 2
        assert len(r.execute_command('ts.range', 'test_key2', "-", "+")) == 2
        assert len(r.execute_command('ts.range', 'test_key3', "-", "+")) == 2
def test_errors():
    with Env().getConnection() as r:
        # test wrong arity
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.ALTER')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.ADD')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.MADD')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.INCRBY')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.DECRBY')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATERULE')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.DELETERULE')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.QUERYINDEX')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.GET')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.MGET')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.RANGE')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.MRANGE')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.INFO')

        # different type key
        r.execute_command('SET foo bar')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.GET foo * 5')  # too many args
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.GET foo')  # wrong type
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.GET bar')  # does not exist
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.INFO foo')  # wrong type
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.INFO bar')  # does not exist
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.RANGE foo 0 -1')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.ALTER foo')

        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.ADD values timestamp 5')  # string
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.ADD values * value')  # string
Esempio n. 20
0
    def test_compact(self):
        self.env = Env()
        self.cmd('FLUSHALL')
        q = 100
        self.cmd('CF.RESERVE cf 8 MAXITERATIONS 50')

        for x in xrange(q):
            self.cmd('cf.add cf', str(x))

        for x in xrange(q):
            self.assertEqual(1, self.cmd('cf.exists cf', str(x)))

        str1 = self.cmd('cf.debug cf')[49:52]
        self.assertGreaterEqual(str1, "130")  # In experiments was larger than 130
        self.assertEqual(self.cmd('cf.compact cf'), 'OK')
        str2 = self.cmd('cf.debug cf')[49:52]
        self.assertGreaterEqual(str1, str2)  # Expect to see reduction after compaction

        self.assertRaises(ResponseError, self.cmd, 'CF.COMPACT a')
        self.assertRaises(ResponseError, self.cmd, 'CF.COMPACT a b')
        self.env = Env(decodeResponses=True)
Esempio n. 21
0
def test_sddel():
    env = Env()
    env.cmd('rdict.sdset', 'myDict', '0.5', '1')
    env.cmd('rdict.sdset', 'myDict', '1', '2')
    env.cmd('rdict.sdset', 'myDict', '2', '3')
    env.expect('rdict.sddel', 'myDict', '0.5', '0.7').equal(1)
    env.expect('rdict.sddel', 'myDict', '0.6').equal(0)
    env.expect('hlen', '{myDict}_sd1').equal(2)
    env.expect('zrange', '{myDict}_sd2', '0', '-1').equal(['1', '2'])
    env.expect('rdict.sddel', 'myDict', '1', '2').equal(2)
    env.expect('keys', '*').equal([])
    env.expect('rdict.sddel', 'myDict', '3').equal(0)
Esempio n. 22
0
def test_empty():
    with Env().getClusterConnectionIfNeeded() as r:
        r.execute_command('ts.create', 'empty')
        info = _get_ts_info(r, 'empty')
        assert info.total_samples == 0
        assert [] == r.execute_command('TS.range', 'empty', 0, -1)
        assert [] == r.execute_command('TS.get', 'empty')

        r.execute_command('ts.create', 'empty_uncompressed', 'uncompressed')
        info = _get_ts_info(r, 'empty_uncompressed')
        assert info.total_samples == 0
        assert [] == r.execute_command('TS.range', 'empty_uncompressed', 0, -1)
        assert [] == r.execute_command('TS.get', 'empty')
Esempio n. 23
0
def test_create_compaction_rule_with_wrong_aggregation():
    with Env().getConnection() as r:
        assert r.execute_command('TS.CREATE', 'tester')
        assert r.execute_command('TS.CREATE', 'tester_agg_max_10')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATERULE', 'tester',
                                     'tester_agg_max_10', 'AGGREGATION',
                                     'MAXX', 10)

        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATERULE', 'tester',
                                     'tester_agg_max_10', 'AGGREGATION', 'MA',
                                     10)
Esempio n. 24
0
def test_lazy_del_src():
    with Env().getClusterConnectionIfNeeded() as r:
        r.execute_command("ts.create", 'src{test}')
        r.execute_command("ts.create", 'dst{test}')
        r.execute_command("ts.createrule", 'src{test}', 'dst{test}', 'AGGREGATION', 'avg', 60000)
        
        assert _get_ts_info(r, 'dst{test}').sourceKey.decode() == 'src{test}'
        assert len(_get_ts_info(r, 'src{test}').rules) == 1
        assert _get_ts_info(r, 'src{test}').rules[0][0].decode() == 'dst{test}'
        r.execute_command('DEL', 'src{test}')

        assert _get_ts_info(r, 'dst{test}').sourceKey == None
        assert len(_get_ts_info(r, 'dst{test}').rules) == 0
Esempio n. 25
0
 def test_compare_env(self):
     self.env = Env(env='oss',
                    logDir=self.test_dir,
                    redisBinaryPath=REDIS_BINARY,
                    redisEnterpriseBinaryPath=REDIS_ENTERPRISE_BINARY,
                    dmcBinaryPath=DMC_PROXY_BINARY)
     env = Env(env='oss',
               logDir=self.test_dir,
               redisBinaryPath=REDIS_BINARY,
               redisEnterpriseBinaryPath=REDIS_ENTERPRISE_BINARY,
               dmcBinaryPath=DMC_PROXY_BINARY)
     assert self.env.compareEnvs(env) is True
     env.stop()
     env = Env(env='oss',
               logDir=self.test_dir,
               redisBinaryPath=REDIS_BINARY,
               redisEnterpriseBinaryPath=REDIS_ENTERPRISE_BINARY,
               dmcBinaryPath=DMC_PROXY_BINARY,
               useAof=True)
     assert self.env.compareEnvs(env) is False
     env.stop()
     self.env.stop()
Esempio n. 26
0
def testSpellCheckNoneExistingDicts():
    env = Env()
    env.cmd('ft.create', 'idx', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
    env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body',
            'body1')
    env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body',
            'body2')
    env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body',
            'name2')
    env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'INCLUDE',
               'dict').raiseError()
    env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'EXCLUDE',
               'dict').raiseError()
Esempio n. 27
0
def test_create_params():
    with Env().getClusterConnectionIfNeeded() as r:
        # test string instead of value
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE', 'invalid', 'RETENTION',
                                     'retention')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE', 'invalid', 'CHUNK_SIZE',
                                     'chunk_size')

        r.execute_command('TS.CREATE', 'a')
        with pytest.raises(redis.ResponseError) as excinfo:
            assert r.execute_command('TS.CREATE', 'a')  # filter exists
def test_empty():
    with Env().getConnection() as r:
        r.execute_command('ts.create empty')
        info = _get_ts_info(r, 'empty')
        assert info.total_samples == 0
        assert [] == r.execute_command('TS.range empty 0 -1')
        assert [] == r.execute_command('TS.get empty')

        r.execute_command('ts.create empty_uncompressed uncompressed')
        info = _get_ts_info(r, 'empty_uncompressed')
        assert info.total_samples == 0
        assert [] == r.execute_command('TS.range empty_uncompressed 0 -1')
        assert [] == r.execute_command('TS.get empty')
Esempio n. 29
0
def testDocIndexedInTwoIndexes():
    env = Env(moduleArgs='MAXDOCTABLESIZE 50')
    env.skipOnCluster()
    env.expect('FT.CREATE idx1 SCHEMA t TEXT').ok()
    env.expect('FT.CREATE idx2 SCHEMA t TEXT').ok()

    for i in range(1000):
        env.expect('HSET', 'doc%d' % i, 't', 'foo').equal(1L)

    env.expect('FT.DROPINDEX idx2 DD').ok()
    env.expect('FT.SEARCH idx1 foo').equal([0L])

    env.expect('FT.DROPINDEX idx1 DD').ok()
Esempio n. 30
0
def testGCIntegrationWithRedisFork(env):
    if env.env == 'existing-env':
        env.skip()
    if env.isCluster():
        raise unittest.SkipTest()
    env = Env(moduleArgs='GC_POLICY FORK')
    env.expect('FT.CONFIG', 'SET', 'FORKGC_SLEEP_BEFORE_EXIT', '4').ok()
    env.expect('FT.CREATE', 'idx', 'SCHEMA', 'title', 'TEXT', 'SORTABLE').ok()
    env.expect('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'title', 'hello world').ok()
    env.expect('bgsave').equal('Background saving started')
    env.cmd('FT.DEBUG', 'GC_FORCEINVOKE', 'idx')
    env.expect('bgsave').equal('Background saving started')
    env.cmd('FT.CONFIG', 'SET', 'FORKGC_SLEEP_BEFORE_EXIT', '0')