Пример #1
0
def test_perform_readahead_happy_path(m_process, m_from_cache, m_release_leader, m_become_leader):
    redis_cli = MockRedis()

    def _smem(_):
        return set(['tscached:kquery:superspecial'])
    redis_cli.smembers = _smem
    m_become_leader.return_value = True
    kqueries = []
    for ndx in xrange(10):
        kq = KQuery(redis_cli)
        kq.cached_data = {'last_add_data': int(datetime.datetime.now().strftime('%s')) - 1800,
                          'redis_key': 'tscached:kquery:' + str(ndx)}
        kqueries.append(kq)
    m_from_cache.return_value = kqueries
    m_process.return_value = {'sample_size': 666}, 'warm_append'

    assert perform_readahead({}, redis_cli) is None
    assert m_become_leader.call_count == 1
    assert m_release_leader.call_count == 1
    assert m_from_cache.call_count == 1
    assert m_from_cache.call_args_list[0][0] == (['tscached:kquery:superspecial'], redis_cli)
    assert m_process.call_count == 10
    k_t_r = {'start_relative': {'unit': 'minutes', 'value': '24194605'}}
    for ndx in xrange(10):
        assert m_process.call_args_list[ndx][0] == ({}, redis_cli, kqueries[ndx], k_t_r)
Пример #2
0
def test_become_leader_catch_rediserror(m_redlock):
    m_redlock.return_value.acquire.return_value = False
    redis_cli = MockRedis()

    def newget(_):
        raise redis.exceptions.RedisError
    redis_cli.get = newget
    assert become_leader({'shadow': {}}, redis_cli) is False
Пример #3
0
def test_merge_at_beginning_too_much_overlap():
    """ trying to merge so much duplicate data we give up and return just the cached data """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA
Пример #4
0
def test_merge_at_beginning_replaces_when_existing_data_is_short():
    """ if we can't iterate over the cached data, and it's out of order, we replace it. """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    new_mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    mts.result = {'values': [[795, 1000], [797, 1100]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA
Пример #5
0
def test_merge_at_beginning_no_overlap():
    """ common case, no overlap """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[788, 9]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == [[788, 9]] + INITIAL_MTS_DATA
Пример #6
0
def test_merge_at_end_one_overlap():
    """ single overlapping point - make sure the new_mts version is favored """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[799, 9001], [800, 21], [801, 22]]}
    mts.merge_at_end(new_mts)
    assert mts.result['values'][-3:] == [[799, 9001], [800, 21], [801, 22]]
Пример #7
0
def test_merge_at_end_no_overlap():
    """ common case, data doesn't overlap """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[800, 21], [801, 22]]}
    mts.merge_at_end(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA + [[800, 21], [801, 22]]
Пример #8
0
def test_merge_at_beginning_two_overlap():
    """ single overlapping point - make sure the new_mts version is favored """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[788, 9], [789, 9001], [790, 10001]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == [[788, 9], [789, 9001], [790, 10001]
                                    ] + INITIAL_MTS_DATA[2:]
Пример #9
0
def test_perform_readahead_redis_error(m_process, m_from_cache, m_release_leader, m_become_leader):
    redis_cli = MockRedis()

    def _smem(_):
        raise redis.exceptions.RedisError("OOPS!")
    redis_cli.smembers = _smem
    m_become_leader.return_value = True

    assert perform_readahead({}, redis_cli) is None
    assert m_become_leader.call_count == 1
    assert m_release_leader.call_count == 1
    assert m_from_cache.call_count == 0
    assert m_process.call_count == 0
Пример #10
0
def test_set_cached(m_create_key):
    m_create_key.return_value = 'some-redis-key'
    redis_cli = MockRedis()
    dc = DataCache(redis_cli, 'sometype')
    dc.expiry = 9001
    dc.set_cached('value-to-set')
    assert redis_cli.set_call_count == 1
    assert redis_cli.set_parms == [['some-redis-key', '"value-to-set"', {'ex': 9001}]]

    redis_cli.success_flag = False
    dc.set_cached('some-other-value')
    assert redis_cli.set_call_count == 2
    assert redis_cli.set_parms[1] == ['some-redis-key', '"some-other-value"', {'ex': 9001}]
Пример #11
0
def test_hot(m_from_cache):
    redis_cli = MockRedis()

    def _fake_build_response(_b, response_kquery, _c=True):
        response_kquery['sample_size'] += 100
        response_kquery['results'].append({'hello': 'goodbye'})
        return response_kquery

    mts_list = []
    for i in xrange(3):
        mts = MTS(redis_cli)
        mts.build_response = _fake_build_response
        mts_list.append(mts)

    m_from_cache.return_value = mts_list

    kq = KQuery(redis_cli)
    kq.cached_data = {
        'mts_keys': ['kquery:mts:1', 'kquery:mts:2', 'kquery:mts:3']
    }
    kairos_time_range = {'start_relative': {'unit': 'hours', 'value': '1'}}

    out = cache_calls.hot(redis_cli, kq, kairos_time_range)
    assert out['sample_size'] == 300
    assert len(out['results']) == 3
Пример #12
0
def test_become_leader_acquire_fail(m_redlock):
    m_redlock.return_value.acquire.return_value = False
    redis_cli = MockRedis()
    assert become_leader({'shadow': {}}, redis_cli) is False
    assert redis_cli.set_call_count == 0
    assert redis_cli.get_parms[0][0] == 'tscached:shadow_server'
    assert redis_cli.get_call_count == 1
Пример #13
0
def test_get_cached(m_create_key):
    m_create_key.return_value = 'some-redis-key'
    redis_cli = MockRedis()
    dc = DataCache(redis_cli, 'sometype')
    assert dc.get_cached() == {'hello': 'goodbye'}
    assert redis_cli.get_call_count == 1
    assert redis_cli.get_parms == [['some-redis-key']]
Пример #14
0
def test_proxy_to_kairos_chunked_happy(m_query_kairos):
    m_query_kairos.return_value = {
        'queries': [{
            'name': 'first'
        }, {'name', 'second'}]
    }

    kq = KQuery(MockRedis())
    kq.query = {'hello': 'goodbye'}
    then = datetime.datetime.fromtimestamp(1234567890)
    diff = datetime.timedelta(minutes=30)
    time_ranges = [(then - diff, then), (then - diff - diff, then - diff)]
    results = kq.proxy_to_kairos_chunked('localhost', 8080, time_ranges)

    assert len(results) == 2
    assert m_query_kairos.call_count == 2
    expected_query = {'cache_time': 0, 'metrics': [{'hello': 'goodbye'}]}

    expected_query['start_absolute'] = int((then - diff).strftime('%s')) * 1000
    expected_query['end_absolute'] = int((then).strftime('%s')) * 1000
    assert m_query_kairos.call_args_list[0] == (('localhost', 8080,
                                                 expected_query), {
                                                     'propagate': False
                                                 })

    expected_query['start_absolute'] = int(
        (then - diff - diff).strftime('%s')) * 1000
    expected_query['end_absolute'] = int((then - diff).strftime('%s')) * 1000
    assert m_query_kairos.call_args_list[1] == (('localhost', 8080,
                                                 expected_query), {
                                                     'propagate': False
                                                 })
Пример #15
0
def test_build_response_yes_trim_efficient_not_ok(m_conforms, m_efficient,
                                                  m_robust):
    m_conforms.return_value = False
    m_robust.return_value = [[1234567890000, 22], [1234567900000, 23]]

    response_kquery = {'results': [], 'sample_size': 0}
    mts = MTS(MockRedis())
    mts.result = {'name': 'myMetric'}
    mts.result['values'] = [[1234567890000, 12], [1234567900000, 13]]

    ktr = {'start_absolute': '1234567880000'}
    result = mts.build_response(ktr, response_kquery, trim=True)
    result = mts.build_response(ktr, response_kquery, trim=True)
    assert len(result) == 2
    assert result['sample_size'] == 4
    assert result['results'][0] == {
        'name': 'myMetric',
        'values': [[1234567890000, 22], [1234567900000, 23]]
    }
    assert result['results'][1] == result['results'][0]
    assert m_conforms.call_count == 2
    assert m_robust.call_count == 2
    assert m_efficient.call_count == 0
    assert m_robust.call_args_list[0][0] == (
        datetime.datetime.fromtimestamp(1234567880), None)
    assert m_robust.call_args_list[1][0] == (
        datetime.datetime.fromtimestamp(1234567880), None)
Пример #16
0
def test_upsert():
    class FakeMTS():
        def get_key(self):
            return 'rick-and-morty'

    redis_cli = MockRedis()
    kq = KQuery(redis_cli)
    kq.query = {'hello': 'some_query'}
    kq.add_mts(FakeMTS())
    kq.upsert(datetime.datetime.fromtimestamp(1234567890), None)
    assert redis_cli.set_call_count == 1
    assert redis_cli.get_call_count == 0
    assert kq.query['mts_keys'] == ['rick-and-morty']
    assert kq.query['last_add_data'] == time.time()
    assert kq.query['earliest_data'] == 1234567890
    assert sorted(kq.query.keys()) == [
        'earliest_data', 'hello', 'last_add_data', 'mts_keys'
    ]

    kq.upsert(datetime.datetime.fromtimestamp(1234567890),
              datetime.datetime.fromtimestamp(1234569890))
    assert redis_cli.set_call_count == 2
    assert redis_cli.get_call_count == 0
    assert kq.query['last_add_data'] == 1234569890
    assert kq.query['earliest_data'] == 1234567890
Пример #17
0
def test__init__(m_create_key):
    m_create_key.return_value = 'some-redis-key'
    redis_cli = MockRedis()

    dc = DataCache(redis_cli, 'sometype')

    assert dc.redis_client == redis_cli
    assert dc.cache_type == 'sometype'
Пример #18
0
def test_robust_trim_no_end():
    mts = MTS(MockRedis())
    data = []
    for i in xrange(1000):
        data.append([(1234567890 + i) * 1000, 0])
    mts.result = {'values': data}

    gen = mts.robust_trim(datetime.datetime.fromtimestamp(1234567990))
    assert len(list(gen)) == 900
Пример #19
0
def test_key_basis_no_unset_keys():
    """ should not include keys that aren't set """
    mts = MTS(MockRedis())
    mts_cardinality = copy.deepcopy(MTS_CARDINALITY)
    del mts_cardinality['group_by']
    mts.result = mts_cardinality
    mts.query_mask = mts_cardinality
    assert mts.key_basis() == mts_cardinality
    assert 'group_by' not in mts.key_basis().keys()
Пример #20
0
def test_key_basis_removes_bad_data():
    """ should remove data not in tags, group_by, aggregators, name. see below for query masking."""
    mts = MTS(MockRedis())
    cardinality_with_bad_data = copy.deepcopy(MTS_CARDINALITY)
    cardinality_with_bad_data = copy.deepcopy(MTS_CARDINALITY)
    cardinality_with_bad_data['something-irrelevant'] = 'whatever'

    mts.query_mask = MTS_CARDINALITY
    mts.result = cardinality_with_bad_data
    assert mts.key_basis() == MTS_CARDINALITY
Пример #21
0
def test_key_basis_does_query_masking():
    """ we only set ecosystem in KQuery, so must remove hostname list when calculating hash.
        otherwise, if the hostname list ever changes (and it will!) the merge will not happen correctly.
    """
    mts = MTS(MockRedis())
    mts.query_mask = {'tags': {'ecosystem': ['dev']}}
    mts.result = MTS_CARDINALITY
    basis = mts.key_basis()
    assert 'ecosystem' in basis['tags']
    assert 'hostname' not in basis['tags']
Пример #22
0
def test_build_response_no_trim():
    response_kquery = {'results': [], 'sample_size': 0}
    mts = MTS(MockRedis())
    mts.result = {'name': 'myMetric'}
    mts.result['values'] = [[1234567890000, 12], [1234567900000, 13]]

    result = mts.build_response({}, response_kquery, trim=False)
    result = mts.build_response({}, response_kquery, trim=False)
    assert len(result) == 2
    assert result['sample_size'] == 4
    assert result['results'] == [mts.result, mts.result]
Пример #23
0
def test_perform_readahead_backend_error(m_process, m_from_cache, m_release_leader, m_become_leader):
    redis_cli = MockRedis()

    def _smem(_):
        return set(['tscached:kquery:superspecial'])
    redis_cli.smembers = _smem
    m_become_leader.return_value = True
    kqueries = []
    for ndx in xrange(10):
        kq = KQuery(redis_cli)
        kq.cached_data = {'last_add_data': int(datetime.datetime.now().strftime('%s')) - 1800,
                          'redis_key': 'tscached:kquery:' + str(ndx)}
        kqueries.append(kq)
    m_from_cache.return_value = kqueries
    m_process.side_effect = BackendQueryFailure('OOPS!')

    assert perform_readahead({}, redis_cli) is None
    assert m_become_leader.call_count == 1
    assert m_release_leader.call_count == 1
    assert m_from_cache.call_count == 1
    assert m_process.call_count == 1
Пример #24
0
def test_proxy_to_kairos_chunked_raises_except(m_query_kairos):
    m_query_kairos.return_value = {
        'error': 'some error message',
        'status_code': 500
    }

    kq = KQuery(MockRedis())
    kq.query = {'hello': 'goodbye'}
    then = datetime.datetime.fromtimestamp(1234567890)
    diff = datetime.timedelta(minutes=30)
    time_ranges = [(then - diff, then), (then - diff - diff, then - diff)]
    with pytest.raises(BackendQueryFailure):
        kq.proxy_to_kairos_chunked('localhost', 8080, time_ranges)
Пример #25
0
def test_ttl_expire_no():
    """ Use default expiries; verify that 120 secs of data doesn't get TTL'd. """
    data = []
    for i in xrange(12):
        then_dt = datetime.datetime.now() - datetime.timedelta(seconds=(10 *
                                                                        i))
        then_ts = int(then_dt.strftime('%s')) * 1000
        data.append([then_ts, i])
    data.reverse()

    mts = MTS(MockRedis())
    mts.result = {'values': data}
    assert mts.ttl_expire() is False
Пример #26
0
def test_upsert():
    redis_cli = MockRedis()
    mts = MTS(redis_cli)
    mts.result = MTS_CARDINALITY
    mts.redis_key = 'hello-key'
    mts.upsert()

    assert redis_cli.set_call_count == 1
    assert redis_cli.get_call_count == 0
    assert redis_cli.set_parms == [[
        'hello-key', json.dumps(MTS_CARDINALITY), {
            'ex': 10800
        }
    ]]
Пример #27
0
def test_from_cache():
    redis_cli = MockRedis()
    keys = ['key1', 'key2', 'key3']
    ret_vals = list(MTS.from_cache(keys, redis_cli))
    assert redis_cli.derived_pipeline.pipe_get_call_count == 3
    assert redis_cli.derived_pipeline.execute_count == 1
    ctr = 0
    for mts in ret_vals:
        assert isinstance(mts, MTS)
        assert mts.result == {'hello': 'goodbye'}
        assert mts.expiry == 10800
        assert mts.redis_key == keys[ctr]
        ctr += 1
    assert redis_cli.set_call_count == 0 and redis_cli.get_call_count == 0
Пример #28
0
def test__init__etc():
    """ Test __init__, key_basis, add_mts. """
    kq = KQuery(MockRedis())
    kq.query = {'wubbalubba': 'dubdub'}
    assert kq.related_mts == set()
    kq.add_mts('hello')
    kq.add_mts('goodbye')
    kq.add_mts('hello')

    testset = set()
    testset.add('hello')
    testset.add('goodbye')
    assert kq.related_mts == testset
    assert kq.key_basis() == {'wubbalubba': 'dubdub'}
Пример #29
0
def test_ttl_expire_yes():
    """ Use default expiries; verify that 120 secs of data doesn't get TTL'd. """
    data = []
    for i in xrange(12):
        then_dt = datetime.datetime.now() - datetime.timedelta(seconds=(10 *
                                                                        i))
        then_ts = int(then_dt.strftime('%s')) * 1000
        data.append([then_ts, i])
    data.reverse()

    mts = MTS(MockRedis())
    mts.result = {'values': data, 'tags': {'no': 'yes'}, 'name': 'whatever'}
    mts.expiry = 60
    mts.gc_expiry = 90
    assert mts.ttl_expire() == datetime.datetime.fromtimestamp(data[5][0] /
                                                               1000)
Пример #30
0
def test_from_request_replace_align_sampling():
    redis_cli = MockRedis()
    aggregator = {
        'name': 'sum',
        'align_sampling': True,
        'sampling': {
            'value': '1',
            'unit': 'minutes'
        }
    }
    example_request = {
        'metrics': [{
            'hello': 'some query',
            'aggregators': [aggregator]
        }],
        'start_relative': {
            'value': '1',
            'unit': 'hours'
        }
    }
    agg_out = {
        'name': 'sum',
        'align_start_time': True,
        'sampling': {
            'value': '1',
            'unit': 'minutes'
        }
    }
    request_out = {
        'metrics': [{
            'hello': 'some query',
            'aggregators': [agg_out]
        }],
        'start_relative': {
            'value': '1',
            'unit': 'hours'
        }
    }

    ret_vals = KQuery.from_request(example_request, redis_cli)
    assert isinstance(ret_vals, GeneratorType)
    ret_vals = list(ret_vals)
    assert len(ret_vals) == 1
    assert isinstance(ret_vals[0], KQuery)
    assert ret_vals[0].query == request_out['metrics'][0]
    assert redis_cli.set_call_count == 0 and redis_cli.get_call_count == 0