Beispiel #1
0
def test_get_chunked_time_ranges_last_12h():
    kairos_timing = {'start_relative': {'unit': 'hours', 'value': '12'}}
    now = datetime.datetime.now()
    results = get_chunked_time_ranges({'chunking': {'chunk_length': 1800}}, kairos_timing)
    assert len(results) == 6
    for i in xrange(len(results)):
        offset = datetime.timedelta(minutes=(i * 120))
        assert results[i][1] == now - offset
        assert results[i][0] == now - offset - datetime.timedelta(minutes=120, seconds=-1)
Beispiel #2
0
def test_get_chunked_time_ranges_last_12h():
    kairos_timing = {'start_relative': {'unit': 'hours', 'value': '12'}}
    now = datetime.datetime.now()
    results = get_chunked_time_ranges({'chunking': {
        'chunk_length': 1800
    }}, kairos_timing)
    assert len(results) == 6
    for i in xrange(len(results)):
        offset = datetime.timedelta(minutes=(i * 120))
        assert results[i][1] == now - offset
        assert results[i][0] == now - offset - datetime.timedelta(minutes=120,
                                                                  seconds=-1)
Beispiel #3
0
def test_get_chunked_time_ranges_last_1h_clock_drift():
    """ Most tests have time frozen, so the clock doesn't drift during execution.
        This one purposely does not. It could flake if it takes over 2s to execute.
    """
    kairos_timing = {'start_relative': {'unit': 'hours', 'value': '1'}}
    now = datetime.datetime.now()
    results = get_chunked_time_ranges({'chunking': {'chunk_length': 1800}}, kairos_timing)
    assert len(results) == 2
    for i in xrange(len(results) - 1):
        offset = datetime.timedelta(minutes=(i * 30))
        end_diff = results[i][1] - (now - offset)
        assert end_diff < datetime.timedelta(seconds=2)
        begin_diff = results[i][0] - (now - offset - datetime.timedelta(minutes=30))
        assert begin_diff < datetime.timedelta(seconds=2)
Beispiel #4
0
def test_get_chunked_time_ranges_last_1h_clock_drift():
    """ Most tests have time frozen, so the clock doesn't drift during execution.
        This one purposely does not. It could flake if it takes over 2s to execute.
    """
    kairos_timing = {'start_relative': {'unit': 'hours', 'value': '1'}}
    now = datetime.datetime.now()
    results = get_chunked_time_ranges({'chunking': {
        'chunk_length': 1800
    }}, kairos_timing)
    assert len(results) == 2
    for i in xrange(len(results) - 1):
        offset = datetime.timedelta(minutes=(i * 30))
        end_diff = results[i][1] - (now - offset)
        assert end_diff < datetime.timedelta(seconds=2)
        begin_diff = results[i][0] - (now - offset -
                                      datetime.timedelta(minutes=30))
        assert begin_diff < datetime.timedelta(seconds=2)
Beispiel #5
0
def cold(config, redis_client, kquery, kairos_time_range):
    """ Cold / Miss, with chunking.
        :param config: dict, 'tscached' level from config file.
        :param redis_client: redis.StrictRedis
        :param kquery: kquery.KQuery object
        :param kairos_time_range: dict, time range from HTTP request payload
        :return: dict, with keys sample_size (int) and results (list of dicts).
    """
    chunked_ranges = get_chunked_time_ranges(config, kairos_time_range)
    results = kquery.proxy_to_kairos_chunked(
        config['kairosdb']['kairosdb_host'],
        config['kairosdb']['kairosdb_port'], chunked_ranges)
    logging.info('KQuery is COLD - using %d chunks' % len(results))

    # Merge everything together as they come out - in chunked order - from the result.
    mts_lookup = {}
    ndx = len(
        results) - 1  # Results come out newest to eldest, so count backwards.
    while ndx >= 0:
        for mts in MTS.from_result(results[ndx]['queries'][0], redis_client,
                                   kquery):

            # Almost certainly a null result. Empty data should not be included in mts_lookup.
            if not mts.result or len(mts.result['values']) == 0:
                logging.debug(
                    'cache_calls.cold: got an empty chunked mts response')
                continue

            if not mts_lookup.get(mts.get_key()):
                mts_lookup[mts.get_key()] = mts
            else:
                # So, we could use merge_at_end, but it throws away beginning/ending values because of
                # partial windowing. But since we force align_start_time, we don't have that worry here.
                mts_lookup[
                    mts.get_key()].result['values'] += mts.result['values']
        ndx -= 1

    # Accumulate the full KQuery response as the Redis operations are being queued up.
    response_kquery = {'results': [], 'sample_size': 0}
    pipeline = redis_client.pipeline()
    for mts in mts_lookup.values():
        if len(mts.result['values']) == 0:
            continue
        kquery.add_mts(mts)
        pipeline.set(mts.get_key(), json.dumps(mts.result), ex=mts.expiry)
        logging.debug('Cold: Writing %d points to MTS: %s' %
                      (len(mts.result['values']), mts.get_key()))
        response_kquery = mts.build_response(kairos_time_range,
                                             response_kquery,
                                             trim=False)

    # Handle a fully empty set of MTS. Bail out before we upsert.
    if len(mts_lookup) == 0:
        kquery.query['values'] = []
        s = {}
        s['name'] = kquery.query['name']
        s['tags'] = {}
        s['values'] = []
        response_kquery['results'].append(s)
        logging.info(
            'Received probable incorrect query; no results. Not caching!')
        return response_kquery

    # Execute the MTS Redis pipeline, then set the KQuery to its full new value.
    try:
        result = pipeline.execute()
        success_count = len(filter(lambda x: x is True, result))
        logging.info("MTS write pipeline: %d of %d successful" %
                     (success_count, len(result)))

        start_time = chunked_ranges[-1][0]
        end_time = chunked_ranges[0][1]
        kquery.upsert(start_time, end_time)
    except redis.exceptions.RedisError as e:
        # We want to eat this Redis exception, because in a catastrophe this becones a straight proxy.
        logging.error('RedisError: ' + e.message)

    return response_kquery
Beispiel #6
0
def test_get_chunked_time_ranges_last_15m():
    kairos_timing = {'start_relative': {'unit': 'minutes', 'value': '15'}}
    now = datetime.datetime.now()
    then = now - datetime.timedelta(minutes=15)
    assert get_chunked_time_ranges({'chunking': {}}, kairos_timing) == [(then, now)]
Beispiel #7
0
def cold(config, redis_client, kquery, kairos_time_range):
    """ Cold / Miss, with chunking.
        :param config: dict, 'tscached' level from config file.
        :param redis_client: redis.StrictRedis
        :param kquery: kquery.KQuery object
        :param kairos_time_range: dict, time range from HTTP request payload
        :return: dict, with keys sample_size (int) and results (list of dicts).
    """
    chunked_ranges = get_chunked_time_ranges(config, kairos_time_range)
    results = kquery.proxy_to_kairos_chunked(config['kairosdb']['host'], config['kairosdb']['port'],
                                             chunked_ranges)
    logging.info('KQuery is COLD - using %d chunks' % len(results))

    # Merge everything together as they come out - in chunked order - from the result.
    mts_lookup = {}
    ndx = len(results) - 1  # Results come out newest to eldest, so count backwards.
    while ndx >= 0:
        for mts in MTS.from_result(results[ndx]['queries'][0], redis_client, kquery):

            # Almost certainly a null result. Empty data should not be included in mts_lookup.
            if not mts.result or len(mts.result['values']) == 0:
                logging.debug('cache_calls.cold: got an empty chunked mts response')
                continue

            if not mts_lookup.get(mts.get_key()):
                mts_lookup[mts.get_key()] = mts
            else:
                # So, we could use merge_at_end, but it throws away beginning/ending values because of
                # partial windowing. But since we force align_start_time, we don't have that worry here.
                mts_lookup[mts.get_key()].result['values'] += mts.result['values']
        ndx -= 1

    # Accumulate the full KQuery response as the Redis operations are being queued up.
    response_kquery = {'results': [], 'sample_size': 0}
    pipeline = redis_client.pipeline()
    for mts in mts_lookup.values():
        kquery.add_mts(mts)
        pipeline.set(mts.get_key(), json.dumps(mts.result), ex=mts.expiry)
        logging.debug('Cold: Writing %d points to MTS: %s' % (len(mts.result['values']), mts.get_key()))
        response_kquery = mts.build_response(kairos_time_range, response_kquery, trim=False)

    # Handle a fully empty set of MTS. Bail out before we upsert.
    if len(mts_lookup) == 0:
        kquery.query['values'] = []
        response_kquery['results'].append(kquery.query)
        logging.info('Received probable incorrect query; no results. Not caching!')
        return response_kquery

    # Execute the MTS Redis pipeline, then set the KQuery to its full new value.
    try:
        result = pipeline.execute()
        success_count = len(filter(lambda x: x is True, result))
        logging.info("MTS write pipeline: %d of %d successful" % (success_count, len(result)))

        start_time = chunked_ranges[-1][0]
        end_time = chunked_ranges[0][1]
        kquery.upsert(start_time, end_time)
    except redis.exceptions.RedisError as e:
        # We want to eat this Redis exception, because in a catastrophe this becones a straight proxy.
        logging.error('RedisError: ' + e.message)

    return response_kquery
Beispiel #8
0
def test_get_chunked_time_ranges_last_15m():
    kairos_timing = {'start_relative': {'unit': 'minutes', 'value': '15'}}
    now = datetime.datetime.now()
    then = now - datetime.timedelta(minutes=15)
    assert get_chunked_time_ranges({'chunking': {}},
                                   kairos_timing) == [(then, now)]