示例#1
0
def test_hdr_interop():
    # decode and add the encoded histograms
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    corrected_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    histogram.decode_and_add(ENCODE_SAMPLES_HDRHISTOGRAM_C[0])
    corrected_histogram.decode_and_add(ENCODE_SAMPLES_HDRHISTOGRAM_C[1])

    # check the percentiles. min, max values match
    check_percentiles(histogram, corrected_histogram)
示例#2
0
    def log_stats(self):

        stats_snapshot = self.stats.copy()

        tmp_stats = OrderedDict()
        if self.stats_snapshot_previous:
            for k in stats_snapshot.keys():
                tmp_stats[k+'_rate'] = (float(stats_snapshot[k]) - float(self.stats_snapshot_previous[k])) / \
                    float(self.stats_interval)
        else:
            for k in stats_snapshot.keys():
                tmp_stats[k + '_rate'] = stats_snapshot[k]
        self.stats_snapshot_previous = stats_snapshot

        # copy and reset latency histogram
        ih = copy.copy(self.interval_histogram)
        self.interval_histogram = HdrHistogram(1, 10000000, 3)

        latency = OrderedDict()
        latency['latency_max'] = ih.get_max_value()
        latency['latency_min'] = ih.get_min_value()
        latency['latency_mean'] = ih.get_mean_value()
        for x in [99.90, 99.00]:
            latency['latency_{0:.2f}'.format(x)] = ih.get_value_at_percentile(
                x)

        # copy and reset ttl histogram
        th = copy.copy(self.ttl_histogram)
        self.ttl_histogram = HdrHistogram(1, 10000000, 3)
        ttl = OrderedDict()
        ttl['ttl_max'] = th.get_max_value()
        ttl['ttl_min'] = th.get_min_value()
        ttl['ttl_mean'] = th.get_mean_value()
        for x in [99.90, 99.00]:
            ttl['ttl_{0:.2f}'.format(x)] = th.get_value_at_percentile(x)

        # copy and reset ttl histogram
        #ml = copy.copy(self.msglag_histogram)
        #self.msglag_histogram = HdrHistogram(1, 10000000, 3)
        #mlag = OrderedDict()
        #mlag['message_lag_max'] = ml.get_max_value()
        #mlag['message_lag_min'] = ml.get_min_value()
        #mlag['message_lag_mean'] = ml.get_mean_value()
        #mlag['message_lag_stddev'] = ml.get_stddev()
        #for x in [99.99, 99.95, 99.00, 95.00, 90.00]:
        #    mlag['message_lag_%.2f' % x] = ml.get_value_at_percentile(x)

        data = OrderedDict()
        data.update(stats_snapshot)
        data.update(tmp_stats)
        data.update(latency)
        data.update(ttl)
        #data.update(mlag)
        data.update({'timestamp': format_time(datetime.utcnow())})

        self._stats_logger.info("{data}", data=json.dumps(data))
示例#3
0
def test_invalid_significant_figures():
    try:
        HdrHistogram(LOWEST, HIGHEST, -1)
        assert False
    except ValueError:
        pass
    try:
        HdrHistogram(LOWEST, HIGHEST, 6)
        assert False
    except ValueError:
        pass
示例#4
0
 def __init__(self, num_histograms, cores, flow_config, opts):
     self.histograms = [
         HdrHistogram(1, 1000 * 1000, 2) for i in range(num_histograms)
     ]
     self.global_histogram = HdrHistogram(1, 1000 * 1000, 2)
     self.cores = cores
     self.flow_config = flow_config
     self.violations = [0 for i in range(len(flow_config))]
     self.dropped = [0 for i in range(len(flow_config))]
     self.print_values = opts.print_values
     if self.print_values:
         self.print_files = [
             open(opts.output_file + '_flow' + str(flow), 'w+')
             for flow in range(len(flow_config))
         ]
 def generate_histogram(cls, jitter_measurements, offset):
     histogram = HdrHistogram(cls.HISTOGRAM_MIN, cls.HISTOGRAM_MAX,
                              cls.SIG_FIGS)
     for m in jitter_measurements:
         if (m + offset) >= 1:
             histogram.record_value((m + offset))
     return histogram
示例#6
0
    def encode_bins(self, p_output):
        p_output = json.loads(p_output)
        p_output['jobs'][0].pop('trim')
        test_list = ['read', 'write']

        for test in test_list:
            histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
            clat = p_output['jobs'][0][test]['clat']['bins']
            total_buckets = clat['FIO_IO_U_PLAT_NR']
            grp_msb_bits = clat['FIO_IO_U_PLAT_BITS']
            buckets_per_grp = clat['FIO_IO_U_PLAT_VAL']

            for bucket in xrange(total_buckets):
                if clat[str(bucket)]:
                    grp = bucket / buckets_per_grp
                    subbucket = bucket % buckets_per_grp
                    if grp == 0:
                        val = subbucket - 1
                    else:
                        base = 2**(grp_msb_bits + grp - 1)
                        val = int(base + (base / buckets_per_grp) *
                                  (subbucket - 0.5))
                    histogram.record_value(val, clat[str(bucket)])

            p_output['jobs'][0][test]['clat']['hist'] = histogram.encode()
            p_output['jobs'][0][test]['clat'].pop('bins')
            p_output['jobs'][0][test]['clat'].pop('percentile')

        return json.dumps(p_output)
示例#7
0
def test_basic():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert histogram.bucket_count == 22
    assert histogram.sub_bucket_count == 2048
    assert histogram.counts_len == 23552
    assert histogram.unit_magnitude == 0
    assert histogram.sub_bucket_half_count_magnitude == 10
示例#8
0
def test_mean_stddev():
    # fill up a histogram with the values in the list
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    for value in VALUES_LIST:
        histogram.record_value(value)
    assert histogram.get_mean_value() == 2000.5
    assert histogram.get_stddev() == 1000.5
示例#9
0
def test_tagged_v2_log_add():
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    log_reader = HistogramLogReader(TAGGED_V2_LOG, accumulated_histogram)
    while 1:
        decoded_histogram = log_reader.add_next_interval_histogram()
        if not decoded_histogram:
            break
示例#10
0
def test_jHiccup_v2_log():
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    for checklist in JHICCUP_CHECKLISTS:
        accumulated_histogram.reset()
        log_reader = HistogramLogReader(JHICCUP_V2_LOG_NAME,
                                        accumulated_histogram)

        histogram_count = 0
        total_count = 0
        target_numbers = checklist.pop('target')
        while 1:
            decoded_histogram = log_reader.get_next_interval_histogram(
                **checklist)
            if not decoded_histogram:
                break
            histogram_count += 1
            total_count += decoded_histogram.get_total_count()
            accumulated_histogram.add(decoded_histogram)
            # These logs use 8 byte counters
            assert decoded_histogram.get_word_size() == 8
            # These logs use the default 1.0 conversion ratio
            assert decoded_histogram.get_int_to_double_conversion_ratio(
            ) == 1.0
        for statement in target_numbers:
            assert eval(statement) == target_numbers[statement]

        log_reader.close()
示例#11
0
def test_hist_codec_partial():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, SIGNIFICANT)

    partial_histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, SIGNIFICANT)

    # put some known numbers in the first half buckets
    half_count = partial_histogram.counts_len
    fill_hist_counts(partial_histogram, half_count)
    encoded = partial_histogram.encode()
    histogram.decode_and_add(encoded)

    # now verify that the partial counters are identical to the original
    check_hist_counts(histogram, half_count, multiplier=1)
    check_hist_counts(histogram,
                      histogram.counts_len,
                      start=half_count + 1,
                      multiplier=0)
示例#12
0
def test_scaled_highest_equiv_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert 8183 == histogram.get_highest_equivalent_value(8180)
    assert 8191 == histogram.get_highest_equivalent_value(8191)
    assert 8199 == histogram.get_highest_equivalent_value(8193)
    assert 9999 == histogram.get_highest_equivalent_value(9995)
    assert 10007 == histogram.get_highest_equivalent_value(10007)
    assert 10015 == histogram.get_highest_equivalent_value(10008)
示例#13
0
def test_scaled_highest_equiv_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert histogram.get_highest_equivalent_value(8180) == 8183
    assert histogram.get_highest_equivalent_value(8191) == 8191
    assert histogram.get_highest_equivalent_value(8193) == 8199
    assert histogram.get_highest_equivalent_value(9995) == 9999
    assert histogram.get_highest_equivalent_value(10007) == 10007
    assert histogram.get_highest_equivalent_value(10008) == 10015
示例#14
0
    def stats_setup(self):

        self.stats_snapshot_previous = None
        self.interval_histogram = HdrHistogram(1, 10000000, 3)
        self.ttl_histogram = HdrHistogram(1, 10000000, 3)
        self.msglag_histogram = HdrHistogram(1, 10000000, 3)

        namespace = "stats_{0!s}_{1!s}_depth_{2!s}".format(
            platform.node(), self.SenderCompID, self.config['market_depth'])
        filename = os.path.join(
            self.stats_dir, namespace +
            datetime.strftime(datetime.utcnow(), "_%Y%m%d%H%M%S") + '.log')

        self._stats_logger = Logger(observer=passThroughFileLogObserver(
            io.open(filename, "a")),
                                    namespace='')
        self.stats_loop = task.LoopingCall(self.log_stats)
        self.stats_loop.start(self.stats_interval)
def test_basic():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    expected_bucket_count = 22 if BITNESS == 64 else 21
    expected_counts_len = 23552 if BITNESS == 64 else 22528
    assert histogram.bucket_count == expected_bucket_count
    assert histogram.sub_bucket_count == 2048
    assert histogram.counts_len == expected_counts_len
    assert histogram.unit_magnitude == 0
    assert histogram.sub_bucket_half_count_magnitude == 10
示例#16
0
def record_latency(name):
    try:
        start = time.time()
        yield
    finally:
        elapsed = time.time() - start
        if name not in hists:
            hists[name] = HdrHistogram(1, 30 * 1000,
                                       2)  # 1ms-30sec, 2 sig figs
        hists[name].record_value(elapsed * 1000)  # ms
示例#17
0
def test_tagged_v2_log():
    histogram_count = 0
    total_count = 0
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    accumulated_histogram_tags = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    log_reader = HistogramLogReader(TAGGED_V2_LOG, accumulated_histogram)
    while 1:
        decoded_histogram = log_reader.get_next_interval_histogram()
        if not decoded_histogram:
            break
        histogram_count += 1
        total_count += decoded_histogram.get_total_count()
        if decoded_histogram.get_tag() == 'A':
            accumulated_histogram_tags.add(decoded_histogram)
        else:
            assert decoded_histogram.get_tag() is None
            accumulated_histogram.add(decoded_histogram)

    assert accumulated_histogram.equals(accumulated_histogram_tags)
    assert total_count == 32290
示例#18
0
def dumpHdrhLog(file, data):
    histogram = HdrHistogram(
        MIN_LATEMCY_USECS, MAX_LATENCY_USECS, LATENCY_SIGNIFICANT_DIGITS
    )

    for d in data:
        histogram.record_value(d)

    histogram.output_percentile_distribution(
        open(file, "wb"), USEC_PER_SEC, TICKS_PER_HALF_DISTANCE
    )
示例#19
0
    def __init__(self, total, concurrency):
        self._total = total
        self._concurrency = concurrency
        self._active = 0
        self._done = 0
        self._error = 0
        self._condition = threading.Condition()
        self._histogram = HdrHistogram(1, 300000000000, 4)

        self._minLatency = sys.maxsize
        self._maxLatency = int('-inf')
示例#20
0
def check_cod_perf():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, 2)
    fill_start_index = (20 * histogram.counts_len) // 100
    fill_to_index = fill_start_index + (30 * histogram.counts_len) // 100
    fill_hist_counts(histogram, fill_to_index, fill_start_index)

    # encode 1000 times
    start = datetime.datetime.now()
    for _ in range(1000):
        histogram.encode()
    delta = datetime.datetime.now() - start
    print(delta)
示例#21
0
def test_large_numbers():
    histogram = HdrHistogram(20000000, 100000000, 5)
    histogram.record_value(100000000)
    histogram.record_value(20000000)
    histogram.record_value(30000000)
    assert histogram.values_are_equivalent(
        20000000, histogram.get_value_at_percentile(50.0))
    assert histogram.values_are_equivalent(
        30000000, histogram.get_value_at_percentile(83.33))
    assert histogram.values_are_equivalent(
        100000000, histogram.get_value_at_percentile(83.34))
    assert histogram.values_are_equivalent(
        100000000, histogram.get_value_at_percentile(99.0))
示例#22
0
def test_get_value_at_percentile():
    histogram = HdrHistogram(LOWEST, 3600000000, 3)
    histogram.record_value(1)
    histogram.record_value(2)
    assert histogram.get_value_at_percentile(50.0) == 1
    assert histogram.get_value_at_percentile(50.00000000000001) == 1
    # assert histogram.get_value_at_percentile(50.0000000000001) == 2
    histogram.record_value(2)
    histogram.record_value(2)
    histogram.record_value(2)
    # val = histogram.get_value_at_percentile(25)
    # assert histogram.get_value_at_percentile(25) == 2
    assert histogram.get_value_at_percentile(30) == 2
示例#23
0
def check_hist_encode(word_size, digits, expected_compressed_length,
                      fill_start_percent, fill_count_percent):
    histogram = HdrHistogram(LOWEST,
                             WRK2_MAX_LATENCY,
                             digits,
                             word_size=word_size)
    if fill_count_percent:
        fill_start_index = (fill_start_percent * histogram.counts_len) // 100
        fill_to_index = fill_start_index + (fill_count_percent *
                                            histogram.counts_len) // 100
        fill_hist_counts(histogram, fill_to_index, fill_start_index)
    b64 = histogram.encode()
    assert len(b64) == expected_compressed_length
示例#24
0
def check_dec_perf():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, 2)
    fill_start_index = (20 * histogram.counts_len) // 100
    fill_to_index = fill_start_index + (30 * histogram.counts_len) // 100
    fill_hist_counts(histogram, fill_to_index, fill_start_index)
    b64 = histogram.encode()

    # decode and add to self 1000 times
    start = datetime.datetime.now()
    for _ in range(1000):
        histogram.decode_and_add(b64)
    delta = datetime.datetime.now() - start
    print(delta)
示例#25
0
def test_highest_equivalent_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert 8183 * 1024 + 1023 == histogram.get_highest_equivalent_value(8180 *
                                                                        1024)
    assert 8191 * 1024 + 1023 == histogram.get_highest_equivalent_value(8191 *
                                                                        1024)
    assert 8199 * 1024 + 1023 == histogram.get_highest_equivalent_value(8193 *
                                                                        1024)
    assert 9999 * 1024 + 1023 == histogram.get_highest_equivalent_value(9995 *
                                                                        1024)
    assert 10007 * 1024 + 1023 == histogram.get_highest_equivalent_value(
        10007 * 1024)
    assert 10015 * 1024 + 1023 == histogram.get_highest_equivalent_value(
        10008 * 1024)
示例#26
0
def check_hist_codec_b64(word_size, b64_wrap):
    histogram = HdrHistogram(LOWEST,
                             WRK2_MAX_LATENCY,
                             SIGNIFICANT,
                             b64_wrap=b64_wrap,
                             word_size=word_size)
    # encode with all zero counters
    encoded = histogram.encode()
    # add back same histogram
    histogram.decode_and_add(encoded)
    # counters should remain zero
    check_hist_counts(histogram, histogram.counts_len, multiplier=0)
    # fill up the histogram
    fill_hist_counts(histogram, histogram.counts_len)
    encoded = histogram.encode()
    histogram.decode_and_add(encoded)
    check_hist_counts(histogram, histogram.counts_len, multiplier=2)
示例#27
0
def FinalizeReport(seedReports, unique_node_file, depth, threads, debug):
    # seed=19284, k=1, runId=0, avgNeighbor=91.0, execTime=0.197093009949
    # AVG Seed iterations.

    output = ''
    avgKNSize = 0
    threadsTotalRuntime = [0] * threads
    runs = 0
    histogram = HdrHistogram(1, 1 * 1000 * 1000, 4)

    for seed in seedReports:
        report = seedReports[seed]
        for iterationReport in report:
            avgNeighbor = iterationReport['avgN']
            execTime = iterationReport['totalTime']
            threadId = iterationReport['threadId']
            threadsTotalRuntime[threadId] += execTime
            histogram.record_value(execTime * 1000)
            if debug is True:
                output += "seed=%s, k=%d, avgNeighbor=%d, execTime=%f[ms]\r\n" % (
                    seed, depth, avgNeighbor, execTime)
                output += "**************************************************************\r\n"

            avgKNSize += avgNeighbor
            runs += 1

    avgKNSize /= runs

    # We're interested in how much time did it took us to compute a single query on average
    # Our total run time equals max(threadsTotalRuntime), and we've completed running
    # N queries.
    totalRuntime = max(threadsTotalRuntime)

    output += "**************************************************************\r\n"
    output += "Summary : avgKNSize=%f, avgQueryTime=%f[ms], totalRuntime=%f[ms]\r\n" % (
        avgKNSize, histogram.get_mean_value() / 1000.0, totalRuntime)
    output += "Latency by percentile : q50=%f[ms], q99=%f[ms], q99.99=%f[ms], q99.9999=%f[ms], \r\n" % (
        histogram.get_value_at_percentile(50.0) / 1000.0,
        histogram.get_value_at_percentile(90.0) / 1000.0,
        histogram.get_value_at_percentile(99.99) / 1000.0,
        histogram.get_value_at_percentile(99.9999) / 1000.0)

    output += "**************************************************************\r\n"

    return output, histogram
示例#28
0
    def consolidate_results(results):
        err_flag = False
        all_res = {'tool': 'wrk2'}
        total_count = len(results)
        if not total_count:
            return all_res

        for key in [
                'http_rps', 'http_total_req', 'http_sock_err',
                'http_sock_timeout', 'http_throughput_kbytes'
        ]:
            all_res[key] = 0
            for item in results:
                all_res[key] += item['results'].get(key, 0)
            all_res[key] = int(all_res[key])

        if 'latency_stats' in results[0]['results']:
            # for item in results:
            #     print item['results']['latency_stats']
            all_res['latency_stats'] = []
            histogram = HdrHistogram(1, 24 * 3600 * 1000 * 1000, 2)
            for item in results:
                if 'latency_stats' in item['results']:
                    histogram.decode_and_add(item['results']['latency_stats'])
                else:
                    err_flag = True
            perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
            latency_dict = histogram.get_percentile_to_value_dict(perc_list)
            for key, value in latency_dict.iteritems():
                all_res['latency_stats'].append([key, value])
            all_res['latency_stats'].sort()

        if err_flag:
            LOG.warning(
                'Unable to find latency_stats from the result dictionary, this '
                'may indicate that the test application on VM exited abnormally.'
            )

        return all_res
示例#29
0
def record_a_latency(name, start, url=None, elapsedmin=10.0):
    if isinstance(url, URL):
        url = url.url
    elapsed = time.time() - start
    latency = latencies.get(name, {})
    latency['count'] = latency.get('count', 0) + 1
    latency['time'] = latency.get('time', 0.0) + elapsed
    if 'hist' not in latency:
        latency['hist'] = HdrHistogram(1, 30 * 1000, 2)  # 1ms-30sec, 2 sig figs
    latency['hist'].record_value(elapsed * 1000)  # ms

    if elapsed > elapsedmin:
        if 'list' not in latency:
            latency['list'] = ValueSortedDict()
        url = url or 'none'
        length = len(latency['list'])
        if length > 9:
            for u in itertools.islice(latency['list'], 9, length):
                del latency['list'][u]
        latency['list'][url] = -elapsed

    latencies[name] = latency
示例#30
0
    def consolidate_results(results):
        total_count = len(results)
        if not total_count:
            return {'tool': 'fio'}

        all_res = {}
        for key in [
                'read_iops', 'read_bw', 'write_iops', 'write_bw',
                'read_runtime_ms', 'write_runtime_ms', 'read_KB', 'write_KB'
        ]:
            total = 0
            for item in results:
                total += item['results'].get(key, 0)
            if total:
                all_res[key] = int(total)
        all_res['tool'] = results[0]['results']['tool']

        clat_list = []
        # perc_list = [1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.5, 99.9, 99.95, 99.99]
        perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
        if 'read_hist' in results[0]['results']:
            clat_list.append('read_hist')
        if 'write_hist' in results[0]['results']:
            clat_list.append('write_hist')

        for clat in clat_list:
            all_res[clat] = []
            histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
            for item in results:
                histogram.decode_and_add(item['results'][clat])

            latency_dict = histogram.get_percentile_to_value_dict(perc_list)
            for key, value in latency_dict.iteritems():
                all_res[clat].append([key, value])
            all_res[clat].sort()

        return all_res