Exemple #1
0
def test_hot(m_from_cache):
    redis_cli = MockRedis()

    def _fake_build_response(_b, response_kquery, _c=True):
        response_kquery['sample_size'] += 100
        response_kquery['results'].append({'hello': 'goodbye'})
        return response_kquery

    mts_list = []
    for i in xrange(3):
        mts = MTS(redis_cli)
        mts.build_response = _fake_build_response
        mts_list.append(mts)

    m_from_cache.return_value = mts_list

    kq = KQuery(redis_cli)
    kq.cached_data = {
        'mts_keys': ['kquery:mts:1', 'kquery:mts:2', 'kquery:mts:3']
    }
    kairos_time_range = {'start_relative': {'unit': 'hours', 'value': '1'}}

    out = cache_calls.hot(redis_cli, kq, kairos_time_range)
    assert out['sample_size'] == 300
    assert len(out['results']) == 3
Exemple #2
0
def test_build_response_yes_trim_efficient_not_ok(m_conforms, m_efficient,
                                                  m_robust):
    m_conforms.return_value = False
    m_robust.return_value = [[1234567890000, 22], [1234567900000, 23]]

    response_kquery = {'results': [], 'sample_size': 0}
    mts = MTS(MockRedis())
    mts.result = {'name': 'myMetric'}
    mts.result['values'] = [[1234567890000, 12], [1234567900000, 13]]

    ktr = {'start_absolute': '1234567880000'}
    result = mts.build_response(ktr, response_kquery, trim=True)
    result = mts.build_response(ktr, response_kquery, trim=True)
    assert len(result) == 2
    assert result['sample_size'] == 4
    assert result['results'][0] == {
        'name': 'myMetric',
        'values': [[1234567890000, 22], [1234567900000, 23]]
    }
    assert result['results'][1] == result['results'][0]
    assert m_conforms.call_count == 2
    assert m_robust.call_count == 2
    assert m_efficient.call_count == 0
    assert m_robust.call_args_list[0][0] == (
        datetime.datetime.fromtimestamp(1234567880), None)
    assert m_robust.call_args_list[1][0] == (
        datetime.datetime.fromtimestamp(1234567880), None)
Exemple #3
0
def test_robust_trim_no_end():
    mts = MTS(MockRedis())
    data = []
    for i in xrange(1000):
        data.append([(1234567890 + i) * 1000, 0])
    mts.result = {'values': data}

    gen = mts.robust_trim(datetime.datetime.fromtimestamp(1234567990))
    assert len(list(gen)) == 900
Exemple #4
0
def test_key_basis_no_unset_keys():
    """ should not include keys that aren't set """
    mts = MTS(MockRedis())
    mts_cardinality = copy.deepcopy(MTS_CARDINALITY)
    del mts_cardinality['group_by']
    mts.result = mts_cardinality
    mts.query_mask = mts_cardinality
    assert mts.key_basis() == mts_cardinality
    assert 'group_by' not in mts.key_basis().keys()
Exemple #5
0
def test_robust_trim_no_end():
    mts = MTS(MockRedis())
    data = []
    for i in xrange(1000):
        data.append([(1234567890 + i) * 1000, 0])
    mts.result = {'values': data}

    gen = mts.robust_trim(datetime.datetime.fromtimestamp(1234567990))
    assert len(list(gen)) == 900
Exemple #6
0
def test_key_basis_removes_bad_data():
    """ should remove data not in tags, group_by, aggregators, name. see below for query masking."""
    mts = MTS(MockRedis())
    cardinality_with_bad_data = copy.deepcopy(MTS_CARDINALITY)
    cardinality_with_bad_data = copy.deepcopy(MTS_CARDINALITY)
    cardinality_with_bad_data['something-irrelevant'] = 'whatever'

    mts.query_mask = MTS_CARDINALITY
    mts.result = cardinality_with_bad_data
    assert mts.key_basis() == MTS_CARDINALITY
Exemple #7
0
def test_key_basis_removes_bad_data():
    """ should remove data not in tags, group_by, aggregators, name. see below for query masking."""
    mts = MTS(MockRedis())
    cardinality_with_bad_data = copy.deepcopy(MTS_CARDINALITY)
    cardinality_with_bad_data = copy.deepcopy(MTS_CARDINALITY)
    cardinality_with_bad_data['something-irrelevant'] = 'whatever'

    mts.query_mask = MTS_CARDINALITY
    mts.result = cardinality_with_bad_data
    assert mts.key_basis() == MTS_CARDINALITY
Exemple #8
0
def test_key_basis_does_query_masking():
    """ we only set ecosystem in KQuery, so must remove hostname list when calculating hash.
        otherwise, if the hostname list ever changes (and it will!) the merge will not happen correctly.
    """
    mts = MTS(MockRedis())
    mts.query_mask = {'tags': {'ecosystem': ['dev']}}
    mts.result = MTS_CARDINALITY
    basis = mts.key_basis()
    assert 'ecosystem' in basis['tags']
    assert 'hostname' not in basis['tags']
Exemple #9
0
def test_upsert():
    redis_cli = MockRedis()
    mts = MTS(redis_cli)
    mts.result = MTS_CARDINALITY
    mts.redis_key = 'hello-key'
    mts.upsert()

    assert redis_cli.set_call_count == 1
    assert redis_cli.get_call_count == 0
    assert redis_cli.set_parms == [['hello-key', json.dumps(MTS_CARDINALITY), {'ex': 10800}]]
Exemple #10
0
def test_key_basis_does_query_masking():
    """ we only set ecosystem in KQuery, so must remove hostname list when calculating hash.
        otherwise, if the hostname list ever changes (and it will!) the merge will not happen correctly.
    """
    mts = MTS(MockRedis())
    mts.query_mask = {'tags': {'ecosystem': ['dev']}}
    mts.result = MTS_CARDINALITY
    basis = mts.key_basis()
    assert 'ecosystem' in basis['tags']
    assert 'hostname' not in basis['tags']
Exemple #11
0
def test_build_response_no_trim():
    response_kquery = {'results': [], 'sample_size': 0}
    mts = MTS(MockRedis())
    mts.result = {'name': 'myMetric'}
    mts.result['values'] = [[1234567890000, 12], [1234567900000, 13]]

    result = mts.build_response({}, response_kquery, trim=False)
    result = mts.build_response({}, response_kquery, trim=False)
    assert len(result) == 2
    assert result['sample_size'] == 4
    assert result['results'] == [mts.result, mts.result]
Exemple #12
0
def test_build_response_no_trim():
    response_kquery = {'results': [], 'sample_size': 0}
    mts = MTS(MockRedis())
    mts.result = {'name': 'myMetric'}
    mts.result['values'] = [[1234567890000, 12], [1234567900000, 13]]

    result = mts.build_response({}, response_kquery, trim=False)
    result = mts.build_response({}, response_kquery, trim=False)
    assert len(result) == 2
    assert result['sample_size'] == 4
    assert result['results'] == [mts.result, mts.result]
Exemple #13
0
def test_ttl_expire_no():
    """ Use default expiries; verify that 120 secs of data doesn't get TTL'd. """
    data = []
    for i in xrange(12):
        then_dt = datetime.datetime.now() - datetime.timedelta(seconds=(10 * i))
        then_ts = int(then_dt.strftime('%s')) * 1000
        data.append([then_ts, i])
    data.reverse()

    mts = MTS(MockRedis())
    mts.result = {'values': data}
    assert mts.ttl_expire() is False
Exemple #14
0
def test_ttl_expire_no():
    """ Use default expiries; verify that 120 secs of data doesn't get TTL'd. """
    data = []
    for i in xrange(12):
        then_dt = datetime.datetime.now() - datetime.timedelta(seconds=(10 *
                                                                        i))
        then_ts = int(then_dt.strftime('%s')) * 1000
        data.append([then_ts, i])
    data.reverse()

    mts = MTS(MockRedis())
    mts.result = {'values': data}
    assert mts.ttl_expire() is False
Exemple #15
0
def test_merge_at_end_no_overlap():
    """ common case, data doesn't overlap """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[800, 21], [801, 22]]}
    mts.merge_at_end(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA + [[800, 21], [801, 22]]
Exemple #16
0
def test_merge_at_end_one_overlap():
    """ single overlapping point - make sure the new_mts version is favored """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[799, 9001], [800, 21], [801, 22]]}
    mts.merge_at_end(new_mts)
    assert mts.result['values'][-3:] == [[799, 9001], [800, 21], [801, 22]]
Exemple #17
0
def test_merge_at_beginning_replaces_when_existing_data_is_short():
    """ if we can't iterate over the cached data, and it's out of order, we replace it. """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    new_mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    mts.result = {'values': [[795, 1000], [797, 1100]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA
Exemple #18
0
def test_merge_at_beginning_no_overlap():
    """ common case, no overlap """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[788, 9]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == [[788, 9]] + INITIAL_MTS_DATA
Exemple #19
0
def test_merge_at_beginning_too_much_overlap():
    """ trying to merge so much duplicate data we give up and return just the cached data """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA
Exemple #20
0
def test_ttl_expire_yes():
    """ Use default expiries; verify that 120 secs of data doesn't get TTL'd. """
    data = []
    for i in xrange(12):
        then_dt = datetime.datetime.now() - datetime.timedelta(seconds=(10 * i))
        then_ts = int(then_dt.strftime('%s')) * 1000
        data.append([then_ts, i])
    data.reverse()

    mts = MTS(MockRedis())
    mts.result = {'values': data, 'tags': {'no': 'yes'}, 'name': 'whatever'}
    mts.expiry = 60
    mts.gc_expiry = 90
    assert mts.ttl_expire() == datetime.datetime.fromtimestamp(data[5][0] / 1000)
Exemple #21
0
def test_merge_at_beginning_two_overlap():
    """ single overlapping point - make sure the new_mts version is favored """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[788, 9], [789, 9001], [790, 10001]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == [[788, 9], [789, 9001], [790, 10001]
                                    ] + INITIAL_MTS_DATA[2:]
Exemple #22
0
def test_merge_at_end_no_overlap():
    """ common case, data doesn't overlap """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[800, 21], [801, 22]]}
    mts.merge_at_end(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA + [[800, 21], [801, 22]]
Exemple #23
0
def test_merge_at_end_one_overlap():
    """ single overlapping point - make sure the new_mts version is favored """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[799, 9001], [800, 21], [801, 22]]}
    mts.merge_at_end(new_mts)
    assert mts.result['values'][-3:] == [[799, 9001], [800, 21], [801, 22]]
Exemple #24
0
def test_merge_at_beginning_no_overlap():
    """ common case, no overlap """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[788, 9]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == [[788, 9]] + INITIAL_MTS_DATA
Exemple #25
0
def test_merge_at_beginning_two_overlap():
    """ single overlapping point - make sure the new_mts version is favored """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': [[788, 9], [789, 9001], [790, 10001]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == [[788, 9], [789, 9001], [790, 10001]] + INITIAL_MTS_DATA[2:]
Exemple #26
0
def test_merge_at_beginning_replaces_when_existing_data_is_short():
    """ if we can't iterate over the cached data, and it's out of order, we replace it. """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    new_mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    mts.result = {'values': [[795, 1000], [797, 1100]]}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA
Exemple #27
0
def test_merge_at_beginning_too_much_overlap():
    """ trying to merge so much duplicate data we give up and return just the cached data """
    mts = MTS(MockRedis())
    mts.key_basis = lambda: 'some-key-goes-here'
    new_mts = MTS(MockRedis())

    mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    new_mts.result = {'values': copy.deepcopy(INITIAL_MTS_DATA)}
    mts.merge_at_beginning(new_mts)
    assert mts.result['values'] == INITIAL_MTS_DATA
Exemple #28
0
def hot(redis_client, kquery, kairos_time_range):
    """ Hot / Hit """
    logging.info("KQuery is HOT")
    response_kquery = {'results': [], 'sample_size': 0}
    for mts in MTS.from_cache(kquery.cached_data.get('mts_keys', []), redis_client):
        response_kquery = mts.build_response(kairos_time_range, response_kquery)

    # Handle a fully empty set of MTS: hand back the expected query with no values.
    if len(response_kquery['results']) == 0:
        kquery.query['values'] = []
        response_kquery['results'].append(kquery.query)
    return response_kquery
Exemple #29
0
def test_build_response_yes_trim_efficient_not_ok(m_conforms, m_efficient, m_robust):
    m_conforms.return_value = False
    m_robust.return_value = [[1234567890000, 22], [1234567900000, 23]]

    response_kquery = {'results': [], 'sample_size': 0}
    mts = MTS(MockRedis())
    mts.result = {'name': 'myMetric'}
    mts.result['values'] = [[1234567890000, 12], [1234567900000, 13]]

    ktr = {'start_absolute': '1234567880000'}
    result = mts.build_response(ktr, response_kquery, trim=True)
    result = mts.build_response(ktr, response_kquery, trim=True)
    assert len(result) == 2
    assert result['sample_size'] == 4
    assert result['results'][0] == {'name': 'myMetric', 'values':
                                    [[1234567890000, 22], [1234567900000, 23]]}
    assert result['results'][1] == result['results'][0]
    assert m_conforms.call_count == 2
    assert m_robust.call_count == 2
    assert m_efficient.call_count == 0
    assert m_robust.call_args_list[0][0] == (datetime.datetime.fromtimestamp(1234567880), None)
    assert m_robust.call_args_list[1][0] == (datetime.datetime.fromtimestamp(1234567880), None)
Exemple #30
0
def hot(redis_client, kquery, kairos_time_range):
    """ Hot / Hit """
    logging.info("KQuery is HOT")
    response_kquery = {'results': [], 'sample_size': 0}
    for mts in MTS.from_cache(kquery.cached_data.get('mts_keys', []),
                              redis_client):
        response_kquery = mts.build_response(kairos_time_range,
                                             response_kquery)

    # Handle a fully empty set of MTS: hand back the expected query with no values.
    if len(response_kquery['results']) == 0:
        kquery.query['values'] = []
        response_kquery['results'].append(kquery.query)
    return response_kquery
Exemple #31
0
def test_hot(m_from_cache):
    redis_cli = MockRedis()

    def _fake_build_response(_b, response_kquery, _c=True):
        response_kquery['sample_size'] += 100
        response_kquery['results'].append({'hello': 'goodbye'})
        return response_kquery

    mts_list = []
    for i in xrange(3):
        mts = MTS(redis_cli)
        mts.build_response = _fake_build_response
        mts_list.append(mts)

    m_from_cache.return_value = mts_list

    kq = KQuery(redis_cli)
    kq.cached_data = {'mts_keys': ['kquery:mts:1', 'kquery:mts:2', 'kquery:mts:3']}
    kairos_time_range = {'start_relative': {'unit': 'hours', 'value': '1'}}

    out = cache_calls.hot(redis_cli, kq, kairos_time_range)
    assert out['sample_size'] == 300
    assert len(out['results']) == 3
Exemple #32
0
def test_from_cache():
    redis_cli = MockRedis()
    keys = ['key1', 'key2', 'key3']
    ret_vals = list(MTS.from_cache(keys, redis_cli))
    assert redis_cli.derived_pipeline.pipe_get_call_count == 3
    assert redis_cli.derived_pipeline.execute_count == 1
    ctr = 0
    for mts in ret_vals:
        assert isinstance(mts, MTS)
        assert mts.result == {'hello': 'goodbye'}
        assert mts.expiry == 10800
        assert mts.redis_key == keys[ctr]
        ctr += 1
    assert redis_cli.set_call_count == 0 and redis_cli.get_call_count == 0
Exemple #33
0
def test_from_cache():
    redis_cli = MockRedis()
    keys = ['key1', 'key2', 'key3']
    ret_vals = list(MTS.from_cache(keys, redis_cli))
    assert redis_cli.derived_pipeline.pipe_get_call_count == 3
    assert redis_cli.derived_pipeline.execute_count == 1
    ctr = 0
    for mts in ret_vals:
        assert isinstance(mts, MTS)
        assert mts.result == {'hello': 'goodbye'}
        assert mts.expiry == 10800
        assert mts.redis_key == keys[ctr]
        ctr += 1
    assert redis_cli.set_call_count == 0 and redis_cli.get_call_count == 0
Exemple #34
0
def test_key_basis_no_unset_keys():
    """ should not include keys that aren't set """
    mts = MTS(MockRedis())
    mts_cardinality = copy.deepcopy(MTS_CARDINALITY)
    del mts_cardinality['group_by']
    mts.result = mts_cardinality
    mts.query_mask = mts_cardinality
    assert mts.key_basis() == mts_cardinality
    assert 'group_by' not in mts.key_basis().keys()
Exemple #35
0
def test_from_result():
    """ Test from_result """
    redis_cli = MockRedis()
    results = {'results': [{'wubba-lubba': 'dub-dub'}, {'thats-the-way': 'the-news-goes'}]}
    kq = KQuery(redis_cli)
    kq.query = 'wat'
    ret_vals = MTS.from_result(results, redis_cli, kq)
    assert isinstance(ret_vals, GeneratorType)
    ctr = 0
    for mts in ret_vals:
        assert isinstance(mts, MTS)
        assert mts.result == results['results'][ctr]
        assert mts.expiry == 10800
        assert mts.cache_type == 'mts'
        assert mts.query_mask == 'wat'
        ctr += 1
    assert redis_cli.set_call_count == 0 and redis_cli.get_call_count == 0
Exemple #36
0
def test_upsert():
    redis_cli = MockRedis()
    mts = MTS(redis_cli)
    mts.result = MTS_CARDINALITY
    mts.redis_key = 'hello-key'
    mts.upsert()

    assert redis_cli.set_call_count == 1
    assert redis_cli.get_call_count == 0
    assert redis_cli.set_parms == [[
        'hello-key', json.dumps(MTS_CARDINALITY), {
            'ex': 10800
        }
    ]]
Exemple #37
0
def test_ttl_expire_yes():
    """ Use default expiries; verify that 120 secs of data doesn't get TTL'd. """
    data = []
    for i in xrange(12):
        then_dt = datetime.datetime.now() - datetime.timedelta(seconds=(10 *
                                                                        i))
        then_ts = int(then_dt.strftime('%s')) * 1000
        data.append([then_ts, i])
    data.reverse()

    mts = MTS(MockRedis())
    mts.result = {'values': data, 'tags': {'no': 'yes'}, 'name': 'whatever'}
    mts.expiry = 60
    mts.gc_expiry = 90
    assert mts.ttl_expire() == datetime.datetime.fromtimestamp(data[5][0] /
                                                               1000)
Exemple #38
0
def test_from_result():
    """ Test from_result """
    redis_cli = MockRedis()
    results = {
        'results': [{
            'wubba-lubba': 'dub-dub'
        }, {
            'thats-the-way': 'the-news-goes'
        }]
    }
    kq = KQuery(redis_cli)
    kq.query = 'wat'
    ret_vals = MTS.from_result(results, redis_cli, kq)
    assert isinstance(ret_vals, GeneratorType)
    ctr = 0
    for mts in ret_vals:
        assert isinstance(mts, MTS)
        assert mts.result == results['results'][ctr]
        assert mts.expiry == 10800
        assert mts.cache_type == 'mts'
        assert mts.query_mask == 'wat'
        ctr += 1
    assert redis_cli.set_call_count == 0 and redis_cli.get_call_count == 0
Exemple #39
0
def cold(config, redis_client, kquery, kairos_time_range):
    """ Cold / Miss, with chunking.
        :param config: dict, 'tscached' level from config file.
        :param redis_client: redis.StrictRedis
        :param kquery: kquery.KQuery object
        :param kairos_time_range: dict, time range from HTTP request payload
        :return: dict, with keys sample_size (int) and results (list of dicts).
    """
    chunked_ranges = get_chunked_time_ranges(config, kairos_time_range)
    results = kquery.proxy_to_kairos_chunked(
        config['kairosdb']['kairosdb_host'],
        config['kairosdb']['kairosdb_port'], chunked_ranges)
    logging.info('KQuery is COLD - using %d chunks' % len(results))

    # Merge everything together as they come out - in chunked order - from the result.
    mts_lookup = {}
    ndx = len(
        results) - 1  # Results come out newest to eldest, so count backwards.
    while ndx >= 0:
        for mts in MTS.from_result(results[ndx]['queries'][0], redis_client,
                                   kquery):

            # Almost certainly a null result. Empty data should not be included in mts_lookup.
            if not mts.result or len(mts.result['values']) == 0:
                logging.debug(
                    'cache_calls.cold: got an empty chunked mts response')
                continue

            if not mts_lookup.get(mts.get_key()):
                mts_lookup[mts.get_key()] = mts
            else:
                # So, we could use merge_at_end, but it throws away beginning/ending values because of
                # partial windowing. But since we force align_start_time, we don't have that worry here.
                mts_lookup[
                    mts.get_key()].result['values'] += mts.result['values']
        ndx -= 1

    # Accumulate the full KQuery response as the Redis operations are being queued up.
    response_kquery = {'results': [], 'sample_size': 0}
    pipeline = redis_client.pipeline()
    for mts in mts_lookup.values():
        if len(mts.result['values']) == 0:
            continue
        kquery.add_mts(mts)
        pipeline.set(mts.get_key(), json.dumps(mts.result), ex=mts.expiry)
        logging.debug('Cold: Writing %d points to MTS: %s' %
                      (len(mts.result['values']), mts.get_key()))
        response_kquery = mts.build_response(kairos_time_range,
                                             response_kquery,
                                             trim=False)

    # Handle a fully empty set of MTS. Bail out before we upsert.
    if len(mts_lookup) == 0:
        kquery.query['values'] = []
        s = {}
        s['name'] = kquery.query['name']
        s['tags'] = {}
        s['values'] = []
        response_kquery['results'].append(s)
        logging.info(
            'Received probable incorrect query; no results. Not caching!')
        return response_kquery

    # Execute the MTS Redis pipeline, then set the KQuery to its full new value.
    try:
        result = pipeline.execute()
        success_count = len(filter(lambda x: x is True, result))
        logging.info("MTS write pipeline: %d of %d successful" %
                     (success_count, len(result)))

        start_time = chunked_ranges[-1][0]
        end_time = chunked_ranges[0][1]
        kquery.upsert(start_time, end_time)
    except redis.exceptions.RedisError as e:
        # We want to eat this Redis exception, because in a catastrophe this becones a straight proxy.
        logging.error('RedisError: ' + e.message)

    return response_kquery
Exemple #40
0
def warm(config, redis_client, kquery, kairos_time_range, range_needed):
    """ Warm / Stale
        config: nested dict loaded from the 'tscached' section of a yaml file.
        redis_client: redis.StrictRedis
        kquery: KQuery, generated from the client's request. get_cached was already called.
        kairos_time_range: dict, contents some subset of '{start,end}_{relative,absolute}'
        range_needed: describes kairos data needed to make cache complete for this request.
                      3-tuple (datetime start, datetime end, const<str>[FETCH_BEFORE, FETCH_AFTER])
    """
    logging.info('KQuery is WARM')

    expected_resolution = config['data'].get('expected_resolution', 10000)

    time_dict = {
        'start_absolute':
        int(range_needed[0].strftime('%s')) * 1000 - expected_resolution,
        'end_absolute': int(range_needed[1].strftime('%s')) * 1000,
    }

    new_kairos_result = kquery.proxy_to_kairos(
        config['kairosdb']['kairosdb_host'],
        config['kairosdb']['kairosdb_port'], time_dict)

    response_kquery = {'results': [], 'sample_size': 0}

    # Initial KQuery, and each MTS, can be slightly different on start/end. We need to get the min/max.
    start_times = [
        datetime.datetime.fromtimestamp(
            float(kquery.cached_data.get('earliest_data')))
    ]
    end_times = [
        datetime.datetime.fromtimestamp(
            float(kquery.cached_data.get('last_add_data')))
    ]

    cached_mts = {}  # redis key to MTS
    # pull in cached MTS, put them in a lookup table
    # TODO expected_resolution should be passed in

    for mts in MTS.from_cache(kquery.cached_data.get('mts_keys', []),
                              redis_client):
        kquery.add_mts(mts)  # we want to write these back eventually
        cached_mts[mts.get_key()] = mts

    # loop over newly returned MTS. if they already existed, merge/write. if not, just write.
    pipeline = redis_client.pipeline()
    sign = False
    for mts in MTS.from_result(new_kairos_result['queries'][0], redis_client,
                               kquery):
        old_mts = cached_mts.get(mts.get_key())
        if not old_mts:  # This MTS just started reporting and isn't yet in the cache (cold behavior).
            if len(mts.result['values']) > 0:
                sign = True
                kquery.add_mts(mts)
                pipeline.set(mts.get_key(),
                             json.dumps(mts.result),
                             ex=mts.expiry)
                response_kquery = mts.build_response(kairos_time_range,
                                                     response_kquery,
                                                     trim=False)
        else:
            if range_needed[2] == FETCH_AFTER:
                end_times.append(range_needed[1])
                old_mts.merge_at_end(mts)

                # This seems the only case where too-old data should be removed.
                expiry = old_mts.ttl_expire()
                if expiry:
                    start_times.append(expiry)

            elif range_needed[2] == FETCH_BEFORE:
                start_times.append(range_needed[0])
                old_mts.merge_at_beginning(mts)
            else:
                logging.error(
                    "WARM is not equipped for this range_needed attrib: %s" %
                    range_needed[2])
                return response_kquery
            sign = True
            if len(old_mts.result['values']) > 0:
                pipeline.set(old_mts.get_key(),
                             json.dumps(old_mts.result),
                             ex=old_mts.expiry)
            response_kquery = old_mts.build_response(kairos_time_range,
                                                     response_kquery)
    if not sign:
        for mts in cached_mts.itervalues():
            response_kquery = mts.build_response(kairos_time_range,
                                                 response_kquery)
    try:
        result = pipeline.execute()
        success_count = len(filter(lambda x: x is True, result))
        logging.info("MTS write pipeline: %d of %d successful" %
                     (success_count, len(result)))

        kquery.upsert(min(start_times), max(end_times))
    except redis.exceptions.RedisError as e:
        # Sneaky edge case where Redis fails after reading but before writing. Still return data!
        logging.error('RedisError: ' + e.message)
    return response_kquery
Exemple #41
0
def cold(config, redis_client, kquery, kairos_time_range):
    """ Cold / Miss, with chunking.
        :param config: dict, 'tscached' level from config file.
        :param redis_client: redis.StrictRedis
        :param kquery: kquery.KQuery object
        :param kairos_time_range: dict, time range from HTTP request payload
        :return: dict, with keys sample_size (int) and results (list of dicts).
    """
    chunked_ranges = get_chunked_time_ranges(config, kairos_time_range)
    results = kquery.proxy_to_kairos_chunked(config['kairosdb']['host'], config['kairosdb']['port'],
                                             chunked_ranges)
    logging.info('KQuery is COLD - using %d chunks' % len(results))

    # Merge everything together as they come out - in chunked order - from the result.
    mts_lookup = {}
    ndx = len(results) - 1  # Results come out newest to eldest, so count backwards.
    while ndx >= 0:
        for mts in MTS.from_result(results[ndx]['queries'][0], redis_client, kquery):

            # Almost certainly a null result. Empty data should not be included in mts_lookup.
            if not mts.result or len(mts.result['values']) == 0:
                logging.debug('cache_calls.cold: got an empty chunked mts response')
                continue

            if not mts_lookup.get(mts.get_key()):
                mts_lookup[mts.get_key()] = mts
            else:
                # So, we could use merge_at_end, but it throws away beginning/ending values because of
                # partial windowing. But since we force align_start_time, we don't have that worry here.
                mts_lookup[mts.get_key()].result['values'] += mts.result['values']
        ndx -= 1

    # Accumulate the full KQuery response as the Redis operations are being queued up.
    response_kquery = {'results': [], 'sample_size': 0}
    pipeline = redis_client.pipeline()
    for mts in mts_lookup.values():
        kquery.add_mts(mts)
        pipeline.set(mts.get_key(), json.dumps(mts.result), ex=mts.expiry)
        logging.debug('Cold: Writing %d points to MTS: %s' % (len(mts.result['values']), mts.get_key()))
        response_kquery = mts.build_response(kairos_time_range, response_kquery, trim=False)

    # Handle a fully empty set of MTS. Bail out before we upsert.
    if len(mts_lookup) == 0:
        kquery.query['values'] = []
        response_kquery['results'].append(kquery.query)
        logging.info('Received probable incorrect query; no results. Not caching!')
        return response_kquery

    # Execute the MTS Redis pipeline, then set the KQuery to its full new value.
    try:
        result = pipeline.execute()
        success_count = len(filter(lambda x: x is True, result))
        logging.info("MTS write pipeline: %d of %d successful" % (success_count, len(result)))

        start_time = chunked_ranges[-1][0]
        end_time = chunked_ranges[0][1]
        kquery.upsert(start_time, end_time)
    except redis.exceptions.RedisError as e:
        # We want to eat this Redis exception, because in a catastrophe this becones a straight proxy.
        logging.error('RedisError: ' + e.message)

    return response_kquery
Exemple #42
0
def test_key_basis_simple():
    """ simple case - requesting one specific MTS, since mask is perfectly equivalent."""
    mts = MTS(MockRedis())
    mts.query_mask = MTS_CARDINALITY
    mts.result = MTS_CARDINALITY
    assert mts.key_basis() == MTS_CARDINALITY
Exemple #43
0
def test_key_basis_simple():
    """ simple case - requesting one specific MTS, since mask is perfectly equivalent."""
    mts = MTS(MockRedis())
    mts.query_mask = MTS_CARDINALITY
    mts.result = MTS_CARDINALITY
    assert mts.key_basis() == MTS_CARDINALITY
Exemple #44
0
def warm(config, redis_client, kquery, kairos_time_range, range_needed):
    """ Warm / Stale
        config: nested dict loaded from the 'tscached' section of a yaml file.
        redis_client: redis.StrictRedis
        kquery: KQuery, generated from the client's request. get_cached was already called.
        kairos_time_range: dict, contents some subset of '{start,end}_{relative,absolute}'
        range_needed: describes kairos data needed to make cache complete for this request.
                      3-tuple (datetime start, datetime end, const<str>[FETCH_BEFORE, FETCH_AFTER])
    """
    logging.info('KQuery is WARM')

    expected_resolution = config['data'].get('expected_resolution', 10000)

    time_dict = {
                    'start_absolute': int(range_needed[0].strftime('%s')) * 1000 - expected_resolution,
                    'end_absolute': int(range_needed[1].strftime('%s')) * 1000,
                }

    new_kairos_result = kquery.proxy_to_kairos(config['kairosdb']['host'], config['kairosdb']['port'],
                                               time_dict)

    response_kquery = {'results': [], 'sample_size': 0}

    # Initial KQuery, and each MTS, can be slightly different on start/end. We need to get the min/max.
    start_times = [datetime.datetime.fromtimestamp(float(kquery.cached_data.get('earliest_data')))]
    end_times = [datetime.datetime.fromtimestamp(float(kquery.cached_data.get('last_add_data')))]

    cached_mts = {}  # redis key to MTS
    # pull in cached MTS, put them in a lookup table
    # TODO expected_resolution should be passed in
    for mts in MTS.from_cache(kquery.cached_data.get('mts_keys', []), redis_client):
        kquery.add_mts(mts)  # we want to write these back eventually
        cached_mts[mts.get_key()] = mts

    # loop over newly returned MTS. if they already existed, merge/write. if not, just write.
    pipeline = redis_client.pipeline()
    for mts in MTS.from_result(new_kairos_result['queries'][0], redis_client, kquery):
        old_mts = cached_mts.get(mts.get_key())

        if not old_mts:  # This MTS just started reporting and isn't yet in the cache (cold behavior).
            kquery.add_mts(mts)
            pipeline.set(mts.get_key(), json.dumps(mts.result), ex=mts.expiry)
            response_kquery = mts.build_response(kairos_time_range, response_kquery, trim=False)
        else:
            if range_needed[2] == FETCH_AFTER:
                end_times.append(range_needed[1])
                old_mts.merge_at_end(mts)

                # This seems the only case where too-old data should be removed.
                expiry = old_mts.ttl_expire()
                if expiry:
                    start_times.append(expiry)

            elif range_needed[2] == FETCH_BEFORE:
                start_times.append(range_needed[0])
                old_mts.merge_at_beginning(mts)
            else:
                logging.error("WARM is not equipped for this range_needed attrib: %s" % range_needed[2])
                return response_kquery

            pipeline.set(old_mts.get_key(), json.dumps(old_mts.result), ex=old_mts.expiry)
            response_kquery = old_mts.build_response(kairos_time_range, response_kquery)
    try:
        result = pipeline.execute()
        success_count = len(filter(lambda x: x is True, result))
        logging.info("MTS write pipeline: %d of %d successful" % (success_count, len(result)))

        kquery.upsert(min(start_times), max(end_times))
    except redis.exceptions.RedisError as e:
        # Sneaky edge case where Redis fails after reading but before writing. Still return data!
        logging.error('RedisError: ' + e.message)
    return response_kquery