Esempio n. 1
0
    def encode_bins(self, p_output):
        p_output = json.loads(p_output)
        p_output['jobs'][0].pop('trim')
        test_list = ['read', 'write']

        for test in test_list:
            histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
            clat = p_output['jobs'][0][test]['clat']['bins']
            total_buckets = clat['FIO_IO_U_PLAT_NR']
            grp_msb_bits = clat['FIO_IO_U_PLAT_BITS']
            buckets_per_grp = clat['FIO_IO_U_PLAT_VAL']

            for bucket in xrange(total_buckets):
                if clat[str(bucket)]:
                    grp = bucket / buckets_per_grp
                    subbucket = bucket % buckets_per_grp
                    if grp == 0:
                        val = subbucket - 1
                    else:
                        base = 2 ** (grp_msb_bits + grp - 1)
                        val = int(base + (base / buckets_per_grp) * (subbucket - 0.5))
                    histogram.record_value(val, clat[str(bucket)])

            p_output['jobs'][0][test]['clat']['hist'] = histogram.encode()
            p_output['jobs'][0][test]['clat'].pop('bins')
            p_output['jobs'][0][test]['clat'].pop('percentile')

        return json.dumps(p_output)
 def generate_histogram(cls, jitter_measurements, offset):
     histogram = HdrHistogram(cls.HISTOGRAM_MIN, cls.HISTOGRAM_MAX,
                              cls.SIG_FIGS)
     for m in jitter_measurements:
         if (m + offset) >= 1:
             histogram.record_value((m + offset))
     return histogram
Esempio n. 3
0
    def encode_bins(self, p_output):
        p_output = json.loads(p_output)
        p_output['jobs'][0].pop('trim')
        test_list = ['read', 'write']

        for test in test_list:
            histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
            clat = p_output['jobs'][0][test]['clat']['bins']
            total_buckets = clat['FIO_IO_U_PLAT_NR']
            grp_msb_bits = clat['FIO_IO_U_PLAT_BITS']
            buckets_per_grp = clat['FIO_IO_U_PLAT_VAL']

            for bucket in xrange(total_buckets):
                if clat[str(bucket)]:
                    grp = bucket / buckets_per_grp
                    subbucket = bucket % buckets_per_grp
                    if grp == 0:
                        val = subbucket - 1
                    else:
                        base = 2**(grp_msb_bits + grp - 1)
                        val = int(base + (base / buckets_per_grp) *
                                  (subbucket - 0.5))
                    histogram.record_value(val, clat[str(bucket)])

            p_output['jobs'][0][test]['clat']['hist'] = histogram.encode()
            p_output['jobs'][0][test]['clat'].pop('bins')
            p_output['jobs'][0][test]['clat'].pop('percentile')

        return json.dumps(p_output)
Esempio n. 4
0
    def consolidate_results(results):
        err_flag = False
        all_res = {'tool': 'wrk2'}
        total_count = len(results)
        if not total_count:
            return all_res

        for key in ['http_rps', 'http_total_req', 'http_sock_err',
                    'http_sock_timeout', 'http_throughput_kbytes']:
            all_res[key] = 0
            for item in results:
                all_res[key] += item['results'].get(key, 0)
            all_res[key] = int(all_res[key])

        if 'latency_stats' in results[0]['results']:
            # for item in results:
            #     print item['results']['latency_stats']
            all_res['latency_stats'] = []
            histogram = HdrHistogram(1, 24 * 3600 * 1000 * 1000, 2)
            for item in results:
                if 'latency_stats' in item['results']:
                    histogram.decode_and_add(item['results']['latency_stats'])
                else:
                    err_flag = True
            perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
            latency_dict = histogram.get_percentile_to_value_dict(perc_list)
            for key, value in latency_dict.iteritems():
                all_res['latency_stats'].append([key, value])
            all_res['latency_stats'].sort()

        if err_flag:
            LOG.warning('Unable to find latency_stats from the result dictionary, this '
                        'may indicate that the test application on VM exited abnormally.')

        return all_res
    def __init__(self, latency_list=None):
        """Create a latency instance.

        latency_list: aggregate all latency values from list if not None
        """
        self.min_usec = sys.maxsize
        self.max_usec = 0
        self.avg_usec = 0
        self.hdrh = None
        if latency_list:
            hdrh_list = []
            for lat in latency_list:
                if lat.available():
                    self.min_usec = min(self.min_usec, lat.min_usec)
                    self.max_usec = max(self.max_usec, lat.max_usec)
                    self.avg_usec += lat.avg_usec
                if lat.hdrh_available():
                    hdrh_list.append(HdrHistogram.decode(lat.hdrh))

            # aggregate histograms if any
            if hdrh_list:
                def add_hdrh(x, y):
                    x.add(y)
                    return x
                decoded_hdrh = reduce(add_hdrh, hdrh_list)
                self.hdrh = HdrHistogram.encode(decoded_hdrh).decode('utf-8')

            # round to nearest usec
            self.avg_usec = int(round(float(self.avg_usec) / len(latency_list)))
Esempio n. 6
0
    def consolidate_results(results):
        all_res = {'tool': 'wrk2'}
        total_count = len(results)
        if not total_count:
            return all_res

        for key in ['http_rps', 'http_total_req', 'http_sock_err',
                    'http_sock_timeout', 'http_throughput_kbytes']:
            all_res[key] = 0
            for item in results:
                if (key in item['results']):
                    all_res[key] += item['results'][key]
            all_res[key] = int(all_res[key])

        if 'latency_stats' in results[0]['results']:
            # for item in results:
            #     print item['results']['latency_stats']
            all_res['latency_stats'] = []
            histogram = HdrHistogram(1, 24 * 3600 * 1000 * 1000, 2)
            for item in results:
                histogram.decode_and_add(item['results']['latency_stats'])
            perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
            latency_dict = histogram.get_percentile_to_value_dict(perc_list)
            for key, value in latency_dict.iteritems():
                all_res['latency_stats'].append([key, value])
            all_res['latency_stats'].sort()

        return all_res
def test_mean_stddev():
    # fill up a histogram with the values in the list
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    for value in VALUES_LIST:
        histogram.record_value(value)
    assert(histogram.get_mean_value() == 2000.5)
    assert(histogram.get_stddev() == 1000.5)
Esempio n. 8
0
def test_scaled_highest_equiv_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert histogram.get_highest_equivalent_value(8180) == 8183
    assert histogram.get_highest_equivalent_value(8191) == 8191
    assert histogram.get_highest_equivalent_value(8193) == 8199
    assert histogram.get_highest_equivalent_value(9995) == 9999
    assert histogram.get_highest_equivalent_value(10007) == 10007
    assert histogram.get_highest_equivalent_value(10008) == 10015
def test_scaled_highest_equiv_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert 8183 == histogram.get_highest_equivalent_value(8180)
    assert 8191 == histogram.get_highest_equivalent_value(8191)
    assert 8199 == histogram.get_highest_equivalent_value(8193)
    assert 9999 == histogram.get_highest_equivalent_value(9995)
    assert 10007 == histogram.get_highest_equivalent_value(10007)
    assert 10015 == histogram.get_highest_equivalent_value(10008)
def test_highest_equivalent_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert 8183 * 1024 + 1023 == histogram.get_highest_equivalent_value(8180 * 1024)
    assert 8191 * 1024 + 1023 == histogram.get_highest_equivalent_value(8191 * 1024)
    assert 8199 * 1024 + 1023 == histogram.get_highest_equivalent_value(8193 * 1024)
    assert 9999 * 1024 + 1023 == histogram.get_highest_equivalent_value(9995 * 1024)
    assert 10007 * 1024 + 1023 == histogram.get_highest_equivalent_value(10007 * 1024)
    assert 10015 * 1024 + 1023 == histogram.get_highest_equivalent_value(10008 * 1024)
Esempio n. 11
0
def test_scaled_highest_equiv_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert 8183 == histogram.get_highest_equivalent_value(8180)
    assert 8191 == histogram.get_highest_equivalent_value(8191)
    assert 8199 == histogram.get_highest_equivalent_value(8193)
    assert 9999 == histogram.get_highest_equivalent_value(9995)
    assert 10007 == histogram.get_highest_equivalent_value(10007)
    assert 10015 == histogram.get_highest_equivalent_value(10008)
Esempio n. 12
0
class Histogram(object):
    def __init__(self, num_histograms, cores, flow_config, opts):
        self.histograms = [
            HdrHistogram(1, 1000 * 1000, 2) for i in range(num_histograms)
        ]
        self.global_histogram = HdrHistogram(1, 1000 * 1000, 2)
        self.cores = cores
        self.flow_config = flow_config
        self.violations = [0 for i in range(len(flow_config))]
        self.dropped = [0 for i in range(len(flow_config))]
        self.print_values = opts.print_values
        if self.print_values:
            self.print_files = [
                open(opts.output_file + '_flow' + str(flow), 'w+')
                for flow in range(len(flow_config))
            ]

    def record_value(self, flow, value):
        self.global_histogram.record_value(value)
        self.histograms[flow].record_value(value)
        if self.flow_config[flow].get('slo'):
            if value > self.flow_config[flow].get('slo'):
                self.violations[flow] += 1
        if self.print_values:
            self.print_files[flow].write(str(value) + '\n')

    def print_info(self):
        info = []
        for i in range(len(self.histograms)):
            # Add the dropped requests as max time
            max_value = self.histograms[i].get_max_value()
            for j in range(self.dropped[i]):
                self.histograms[i].record_value(max_value)

            # Get the total count of received requests
            total_count = self.histograms[i].get_total_count()

            # Get the 99th latency
            latency = self.histograms[i].get_value_at_percentile(99)

            # Prepare the json for output
            new_value = {
                'latency':
                latency,
                'per_core_through':
                (1.0 * (total_count - self.dropped[i]) / self.cores),
                'slo_success':
                1.0 - (1.0 * self.violations[i] / total_count),
                'dropped_requests':
                self.dropped[i]
            }
            info.append(new_value)
        print json.dumps(info)

    def drop_request(self, flow_id):
        self.dropped[flow_id] += 1
        self.violations[flow_id] += 1
Esempio n. 13
0
def test_dump_histogram():
    samples = [
        # standard Hdr test histogram
        'HISTFAAAACF4nJNpmSzMwMDAzAABMJoRTM6Y1mD/ASLwN5oJAFuQBYU=',
        'HISTFAAAACh4nJNpmSzMwMDAyQABzFCaEUzOmNZg/wEisL2Kaasc00ImJgCC8Qbe'
    ]
    for hdrh in samples:
        HdrHistogram.dump(hdrh, output=open(os.devnull, 'wb'))
        HdrHistogram.dump(hdrh)
Esempio n. 14
0
    def log_stats(self):

        stats_snapshot = self.stats.copy()

        tmp_stats = OrderedDict()
        if self.stats_snapshot_previous:
            for k in stats_snapshot.keys():
                tmp_stats[k+'_rate'] = (float(stats_snapshot[k]) - float(self.stats_snapshot_previous[k])) / \
                    float(self.stats_interval)
        else:
            for k in stats_snapshot.keys():
                tmp_stats[k + '_rate'] = stats_snapshot[k]
        self.stats_snapshot_previous = stats_snapshot

        # copy and reset latency histogram
        ih = copy.copy(self.interval_histogram)
        self.interval_histogram = HdrHistogram(1, 10000000, 3)

        latency = OrderedDict()
        latency['latency_max'] = ih.get_max_value()
        latency['latency_min'] = ih.get_min_value()
        latency['latency_mean'] = ih.get_mean_value()
        for x in [99.90, 99.00]:
            latency['latency_{0:.2f}'.format(x)] = ih.get_value_at_percentile(
                x)

        # copy and reset ttl histogram
        th = copy.copy(self.ttl_histogram)
        self.ttl_histogram = HdrHistogram(1, 10000000, 3)
        ttl = OrderedDict()
        ttl['ttl_max'] = th.get_max_value()
        ttl['ttl_min'] = th.get_min_value()
        ttl['ttl_mean'] = th.get_mean_value()
        for x in [99.90, 99.00]:
            ttl['ttl_{0:.2f}'.format(x)] = th.get_value_at_percentile(x)

        # copy and reset ttl histogram
        #ml = copy.copy(self.msglag_histogram)
        #self.msglag_histogram = HdrHistogram(1, 10000000, 3)
        #mlag = OrderedDict()
        #mlag['message_lag_max'] = ml.get_max_value()
        #mlag['message_lag_min'] = ml.get_min_value()
        #mlag['message_lag_mean'] = ml.get_mean_value()
        #mlag['message_lag_stddev'] = ml.get_stddev()
        #for x in [99.99, 99.95, 99.00, 95.00, 90.00]:
        #    mlag['message_lag_%.2f' % x] = ml.get_value_at_percentile(x)

        data = OrderedDict()
        data.update(stats_snapshot)
        data.update(tmp_stats)
        data.update(latency)
        data.update(ttl)
        #data.update(mlag)
        data.update({'timestamp': format_time(datetime.utcnow())})

        self._stats_logger.info("{data}", data=json.dumps(data))
Esempio n. 15
0
def main():
    args = sys.argv[1:]
    if args:
        encoded_histograms = args
        for hdrh in encoded_histograms:
            print('\nDumping histogram: ' + hdrh + '\n')
            HdrHistogram.dump(hdrh)
    else:
        print('\nUsage: %s [<string encoded hdr histogram>]*\n' %
              (sys.argv[0]))
Esempio n. 16
0
def test_basic():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    expected_bucket_count = 22 if BITNESS == 64 else 21
    expected_counts_len = 23552 if BITNESS == 64 else 22528
    assert histogram.bucket_count == expected_bucket_count
    assert histogram.sub_bucket_count == 2048
    assert histogram.counts_len == expected_counts_len
    assert histogram.unit_magnitude == 0
    assert histogram.sub_bucket_half_count_magnitude == 10
    assert histogram.get_count_at_sub_bucket(0, 0) == 0
    assert histogram.equals(histogram)
Esempio n. 17
0
def test_invalid_significant_figures():
    try:
        HdrHistogram(LOWEST, HIGHEST, -1)
        assert False
    except ValueError:
        pass
    try:
        HdrHistogram(LOWEST, HIGHEST, 6)
        assert False
    except ValueError:
        pass
Esempio n. 18
0
    def __init__(self, total, concurrency):
        self._total = total
        self._concurrency = concurrency
        self._active = 0
        self._done = 0
        self._error = 0
        self._condition = threading.Condition()
        self._histogram = HdrHistogram(1, 300000000000, 4)

        self._minLatency = sys.maxsize
        self._maxLatency = int('-inf')
Esempio n. 19
0
def check_cod_perf():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, 2)
    fill_start_index = (20 * histogram.counts_len) // 100
    fill_to_index = fill_start_index + (30 * histogram.counts_len) // 100
    fill_hist_counts(histogram, fill_to_index, fill_start_index)

    # encode 1000 times
    start = datetime.datetime.now()
    for _ in range(1000):
        histogram.encode()
    delta = datetime.datetime.now() - start
    print(delta)
def check_cod_perf():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, 2)
    fill_start_index = (20 * histogram.counts_len) // 100
    fill_to_index = fill_start_index + (30 * histogram.counts_len) // 100
    fill_hist_counts(histogram, fill_to_index, fill_start_index)

    # encode 1000 times
    start = datetime.datetime.now()
    for _ in range(1000):
        histogram.encode()
    delta = datetime.datetime.now() - start
    print(delta)
Esempio n. 21
0
def check_dec_perf():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, 2)
    fill_start_index = (20 * histogram.counts_len) // 100
    fill_to_index = fill_start_index + (30 * histogram.counts_len) // 100
    fill_hist_counts(histogram, fill_to_index, fill_start_index)
    b64 = histogram.encode()

    # decode and add to self 1000 times
    start = datetime.datetime.now()
    for _ in range(1000):
        histogram.decode_and_add(b64)
    delta = datetime.datetime.now() - start
    print(delta)
def check_dec_perf():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, 2)
    fill_start_index = (20 * histogram.counts_len) // 100
    fill_to_index = fill_start_index + (30 * histogram.counts_len) // 100
    fill_hist_counts(histogram, fill_to_index, fill_start_index)
    b64 = histogram.encode()

    # decode and add to self 1000 times
    start = datetime.datetime.now()
    for _ in range(1000):
        histogram.decode_and_add(b64)
    delta = datetime.datetime.now() - start
    print(delta)
Esempio n. 23
0
def check_hist_encode(word_size, digits, expected_compressed_length,
                      fill_start_percent, fill_count_percent):
    histogram = HdrHistogram(LOWEST,
                             WRK2_MAX_LATENCY,
                             digits,
                             word_size=word_size)
    if fill_count_percent:
        fill_start_index = (fill_start_percent * histogram.counts_len) // 100
        fill_to_index = fill_start_index + (fill_count_percent *
                                            histogram.counts_len) // 100
        fill_hist_counts(histogram, fill_to_index, fill_start_index)
    b64 = histogram.encode()
    assert len(b64) == expected_compressed_length
def check_hist_encode(word_size,
                      digits,
                      expected_compressed_length,
                      fill_start_percent,
                      fill_count_percent):
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, digits,
                             word_size=word_size)
    if fill_count_percent:
        fill_start_index = (fill_start_percent * histogram.counts_len) // 100
        fill_to_index = fill_start_index + (fill_count_percent * histogram.counts_len) // 100
        fill_hist_counts(histogram, fill_to_index, fill_start_index)
    b64 = histogram.encode()
    assert(len(b64) == expected_compressed_length)
def test_hist_codec_partial():
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, SIGNIFICANT)

    partial_histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, SIGNIFICANT)

    # put some known numbers in the first half buckets
    half_count = partial_histogram.counts_len
    fill_hist_counts(partial_histogram, half_count)
    encoded = partial_histogram.encode()
    histogram.decode_and_add(encoded)

    # now verify that the partial counters are identical to the original
    check_hist_counts(histogram, half_count, multiplier=1)
    check_hist_counts(histogram, histogram.counts_len, start=half_count + 1, multiplier=0)
Esempio n. 26
0
 def __init__(self, num_histograms, cores, flow_config, opts):
     self.histograms = [
         HdrHistogram(1, 1000 * 1000, 2) for i in range(num_histograms)
     ]
     self.global_histogram = HdrHistogram(1, 1000 * 1000, 2)
     self.cores = cores
     self.flow_config = flow_config
     self.violations = [0 for i in range(len(flow_config))]
     self.dropped = [0 for i in range(len(flow_config))]
     self.print_values = opts.print_values
     if self.print_values:
         self.print_files = [
             open(opts.output_file + '_flow' + str(flow), 'w+')
             for flow in range(len(flow_config))
         ]
Esempio n. 27
0
def test_tagged_v2_log_add():
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    log_reader = HistogramLogReader(TAGGED_V2_LOG, accumulated_histogram)
    while 1:
        decoded_histogram = log_reader.add_next_interval_histogram()
        if not decoded_histogram:
            break
Esempio n. 28
0
def test_basic():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert histogram.bucket_count == 22
    assert histogram.sub_bucket_count == 2048
    assert histogram.counts_len == 23552
    assert histogram.unit_magnitude == 0
    assert histogram.sub_bucket_half_count_magnitude == 10
Esempio n. 29
0
def test_hdr_interop():
    # decode and add the encoded histograms
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    corrected_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    histogram.decode_and_add(ENCODE_SAMPLES_HDRHISTOGRAM_C[0])
    corrected_histogram.decode_and_add(ENCODE_SAMPLES_HDRHISTOGRAM_C[1])

    # check the percentiles. min, max values match
    check_percentiles(histogram, corrected_histogram)
Esempio n. 30
0
    def __call__(
        self,
        *,
        started_workunits: tuple[Workunit, ...],
        completed_workunits: tuple[Workunit, ...],
        finished: bool,
        context: StreamingWorkunitContext,
    ) -> None:
        if not self.enabled:
            return

        # Aggregate counters on completed workunits.
        for workunit in completed_workunits:
            if "counters" in workunit:
                for name, value in workunit["counters"].items():
                    self.counters[name] += value

        if not finished:
            return

        # Add any counters with a count of 0.
        for counter in context.run_tracker.counter_names:
            if counter not in self.counters:
                self.counters[counter] = 0

        # Log aggregated counters.
        counter_lines = "\n".join(
            f"  {name}: {count}"
            for name, count in sorted(self.counters.items()))
        logger.info(f"Counters:\n{counter_lines}")

        if not self.has_histogram_module:
            return
        from hdrh.histogram import HdrHistogram

        histograms = context.get_observation_histograms()["histograms"]
        if not histograms:
            logger.info("No observation histogram were recorded.")
            return

        logger.info("Observation histogram summaries:")
        for name, encoded_histogram in histograms.items():
            # Note: The Python library for HDR Histogram will only decode compressed histograms
            # that are further encoded with base64. See
            # https://github.com/HdrHistogram/HdrHistogram_py/issues/29.
            histogram = HdrHistogram.decode(
                base64.b64encode(encoded_histogram))
            percentile_to_vals = "\n".join(
                f"  p{percentile}: {value}" for percentile, value in histogram.
                get_percentile_to_value_dict([25, 50, 75, 90, 95, 99]).items())
            logger.info(f"Summary of `{name}` observation histogram:\n"
                        f"  min: {histogram.get_min_value()}\n"
                        f"  max: {histogram.get_max_value()}\n"
                        f"  mean: {histogram.get_mean_value():.3f}\n"
                        f"  std dev: {histogram.get_stddev():.3f}\n"
                        f"  total observations: {histogram.total_count}\n"
                        f"{percentile_to_vals}")
Esempio n. 31
0
def dump(args=None):
    """
    Dump a list of Hdr histograms encodings

    args: list of strings, each string representing an Hdr encoding
    """
    if not args:
        args = sys.argv[1:]
    res = 1
    if args:
        encoded_histograms = args
        for hdrh in encoded_histograms:
            print('\nDumping histogram: ' + hdrh + '\n')
            HdrHistogram.dump(hdrh)
        res = 0
    else:
        print('\nUsage: %s [<string encoded hdr histogram>]*\n' %
              (sys.argv[0]))
    return res
Esempio n. 32
0
def record_latency(name):
    try:
        start = time.time()
        yield
    finally:
        elapsed = time.time() - start
        if name not in hists:
            hists[name] = HdrHistogram(1, 30 * 1000,
                                       2)  # 1ms-30sec, 2 sig figs
        hists[name].record_value(elapsed * 1000)  # ms
Esempio n. 33
0
def dumpHdrhLog(file, data):
    histogram = HdrHistogram(
        MIN_LATEMCY_USECS, MAX_LATENCY_USECS, LATENCY_SIGNIFICANT_DIGITS
    )

    for d in data:
        histogram.record_value(d)

    histogram.output_percentile_distribution(
        open(file, "wb"), USEC_PER_SEC, TICKS_PER_HALF_DISTANCE
    )
Esempio n. 34
0
def test_highest_equivalent_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert 8183 * 1024 + 1023 == histogram.get_highest_equivalent_value(8180 *
                                                                        1024)
    assert 8191 * 1024 + 1023 == histogram.get_highest_equivalent_value(8191 *
                                                                        1024)
    assert 8199 * 1024 + 1023 == histogram.get_highest_equivalent_value(8193 *
                                                                        1024)
    assert 9999 * 1024 + 1023 == histogram.get_highest_equivalent_value(9995 *
                                                                        1024)
    assert 10007 * 1024 + 1023 == histogram.get_highest_equivalent_value(
        10007 * 1024)
    assert 10015 * 1024 + 1023 == histogram.get_highest_equivalent_value(
        10008 * 1024)
Esempio n. 35
0
def test_mean_stddev():
    # fill up a histogram with the values in the list
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    for value in VALUES_LIST:
        histogram.record_value(value)
    assert histogram.get_mean_value() == 2000.5
    assert histogram.get_stddev() == 1000.5
Esempio n. 36
0
def test_jHiccup_v2_log():
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    for checklist in JHICCUP_CHECKLISTS:
        accumulated_histogram.reset()
        log_reader = HistogramLogReader(JHICCUP_V2_LOG_NAME,
                                        accumulated_histogram)

        histogram_count = 0
        total_count = 0
        target_numbers = checklist.pop('target')
        while 1:
            decoded_histogram = log_reader.get_next_interval_histogram(
                **checklist)
            if not decoded_histogram:
                break
            histogram_count += 1
            total_count += decoded_histogram.get_total_count()
            accumulated_histogram.add(decoded_histogram)
            # These logs use 8 byte counters
            assert decoded_histogram.get_word_size() == 8
            # These logs use the default 1.0 conversion ratio
            assert decoded_histogram.get_int_to_double_conversion_ratio(
            ) == 1.0
        for statement in target_numbers:
            assert eval(statement) == target_numbers[statement]

        log_reader.close()
def test_jHiccup_v2_log():
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    for checklist in JHICCUP_CHECKLISTS:
        accumulated_histogram.reset()
        log_reader = HistogramLogReader(JHICCUP_V2_LOG_NAME, accumulated_histogram)

        histogram_count = 0
        total_count = 0
        target_numbers = checklist.pop('target')
        while 1:
            decoded_histogram = log_reader.get_next_interval_histogram(**checklist)
            if not decoded_histogram:
                break
            histogram_count += 1
            total_count += decoded_histogram.get_total_count()
            accumulated_histogram.add(decoded_histogram)
            # These logs use 8 byte counters
            assert(decoded_histogram.get_word_size() == 8)
        for statement in target_numbers:
            assert(eval(statement) == target_numbers[statement])

        log_reader.close()
 def from_row(cls, row, prefix=""):
     if row is None:
         return None
     else:
         raw_histo = row[prefix + "jitterHistogram"]
         return cls(
             row[prefix + "measurementID"], row[prefix + "serviceSetID"],
             row[prefix + "minJitter"], row[prefix + "maxJitter"],
             row[prefix + "avgJitter"], row[prefix + "stdDevJitter"],
             HdrHistogram.decode(raw_histo, b64_wrap=False)
             if raw_histo else None, row[prefix + "jitterHistogramOffset"],
             row[prefix + "interval"],
             cls._json_loads(row[prefix + "extraJSONData"]))
Esempio n. 39
0
    def consolidate_results(results):
        err_flag = False
        all_res = {'tool': 'wrk2'}
        total_count = len(results)
        if not total_count:
            return all_res

        for key in [
                'http_rps', 'http_total_req', 'http_sock_err',
                'http_sock_timeout', 'http_throughput_kbytes'
        ]:
            all_res[key] = 0
            for item in results:
                all_res[key] += item['results'].get(key, 0)
            all_res[key] = int(all_res[key])

        if 'latency_stats' in results[0]['results']:
            # for item in results:
            #     print item['results']['latency_stats']
            all_res['latency_stats'] = []
            histogram = HdrHistogram(1, 24 * 3600 * 1000 * 1000, 2)
            for item in results:
                if 'latency_stats' in item['results']:
                    histogram.decode_and_add(item['results']['latency_stats'])
                else:
                    err_flag = True
            perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
            latency_dict = histogram.get_percentile_to_value_dict(perc_list)
            for key, value in latency_dict.iteritems():
                all_res['latency_stats'].append([key, value])
            all_res['latency_stats'].sort()

        if err_flag:
            LOG.warning(
                'Unable to find latency_stats from the result dictionary, this '
                'may indicate that the test application on VM exited abnormally.'
            )

        return all_res
Esempio n. 40
0
    def consolidate_results(results):
        total_count = len(results)
        if not total_count:
            return {'tool': 'fio'}

        all_res = {}
        for key in [
                'read_iops', 'read_bw', 'write_iops', 'write_bw',
                'read_runtime_ms', 'write_runtime_ms', 'read_KB', 'write_KB'
        ]:
            total = 0
            for item in results:
                total += item['results'].get(key, 0)
            if total:
                all_res[key] = int(total)
        all_res['tool'] = results[0]['results']['tool']

        clat_list = []
        # perc_list = [1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.5, 99.9, 99.95, 99.99]
        perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
        if 'read_hist' in results[0]['results']:
            clat_list.append('read_hist')
        if 'write_hist' in results[0]['results']:
            clat_list.append('write_hist')

        for clat in clat_list:
            all_res[clat] = []
            histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
            for item in results:
                histogram.decode_and_add(item['results'][clat])

            latency_dict = histogram.get_percentile_to_value_dict(perc_list)
            for key, value in latency_dict.iteritems():
                all_res[clat].append([key, value])
            all_res[clat].sort()

        return all_res
def test_hdr_interop():
    # decode and add the encoded histograms
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    corrected_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    histogram.decode_and_add(ENCODE_SAMPLES_HDRHISTOGRAM_C[0])
    corrected_histogram.decode_and_add(ENCODE_SAMPLES_HDRHISTOGRAM_C[1])

    # check the percentiles. min, max values match
    check_percentiles(histogram, corrected_histogram)
Esempio n. 42
0
def test_tagged_v2_log():
    histogram_count = 0
    total_count = 0
    accumulated_histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    accumulated_histogram_tags = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    log_reader = HistogramLogReader(TAGGED_V2_LOG, accumulated_histogram)
    while 1:
        decoded_histogram = log_reader.get_next_interval_histogram()
        if not decoded_histogram:
            break
        histogram_count += 1
        total_count += decoded_histogram.get_total_count()
        if decoded_histogram.get_tag() == 'A':
            accumulated_histogram_tags.add(decoded_histogram)
        else:
            assert decoded_histogram.get_tag() is None
            accumulated_histogram.add(decoded_histogram)

    assert accumulated_histogram.equals(accumulated_histogram_tags)
    assert total_count == 32290
def check_hist_codec_b64(word_size, b64_wrap):
    histogram = HdrHistogram(LOWEST, WRK2_MAX_LATENCY, SIGNIFICANT,
                             b64_wrap=b64_wrap,
                             word_size=word_size)
    # encode with all zero counters
    encoded = histogram.encode()
    # add back same histogram
    histogram.decode_and_add(encoded)
    # counters should remain zero
    check_hist_counts(histogram, histogram.counts_len, multiplier=0)
    # fill up the histogram
    fill_hist_counts(histogram, histogram.counts_len)
    encoded = histogram.encode()
    histogram.decode_and_add(encoded)
    check_hist_counts(histogram, histogram.counts_len, multiplier=2)
def test_record_value():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    histogram.record_value(TEST_VALUE_LEVEL)
    assert(histogram.get_count_at_value(TEST_VALUE_LEVEL) == 1)
    assert(histogram.get_total_count() == 1)
Esempio n. 45
0
    def _decode_next_interval_histogram(self,
                                        dest_histogram,
                                        range_start_time_sec=0.0,
                                        range_end_time_sec=sys.maxint,
                                        absolute=False):
        '''Read the next interval histogram from the log, if interval falls
        within an absolute or relative time range.

        Timestamps are assumed to appear in order in the log file, and as such
        this method will return a null upon encountering a timestamp larger than
        range_end_time_sec.

        Relative time range:
            the range is assumed to be in seconds relative to
            the actual timestamp value found in each interval line in the log
        Absolute time range:
            Absolute timestamps are calculated by adding the timestamp found
            with the recorded interval to the [latest, optional] start time
            found in the log. The start time is indicated in the log with
            a "#[StartTime: " followed by the start time in seconds.

        Params:
            dest_histogram if None, created a new histogram, else adds
                           the new interval histogram to it
            range_start_time_sec The absolute or relative start of the expected
                                 time range, in seconds.
            range_start_time_sec The absolute or relative end of the expected
                                  time range, in seconds.
            absolute Defines if the passed range is absolute or relative

        Return:
            Returns an histogram object if an interval line was found with an
            associated start timestamp value that falls between start_time_sec and
            end_time_sec,
            or null if no such interval line is found.
            Upon encountering any unexpected format errors in reading the next
            interval from the file, this method will return None.

            The histogram returned will have it's timestamp set to the absolute
            timestamp calculated from adding the interval's indicated timestamp
            value to the latest [optional] start time found in the log.

        Exceptions:
            ValueError if there is a syntax error in one of the float fields
        '''
        while 1:
            line = self.input_file.readline()
            if not line:
                return None
            if line[0] == '#':
                match_res = re_start_time.match(line)
                if match_res:
                    self.start_time_sec = float(match_res.group(1))
                    self.observed_start_time = True
                    continue
                match_res = re_base_time.match(line)
                if match_res:
                    self.base_time = float(match_res.group(1))
                    self.observed_base_time = True
                    continue

            match_res = re_histogram_interval.match(line)
            if not match_res:
                # probably a legend line that starts with "\"StartTimestamp"
                continue
            # Decode: startTimestamp, intervalLength, maxTime, histogramPayload
            # Timestamp is expected to be in seconds
            log_time_stamp_in_sec = float(match_res.group(1))
            interval_length_sec = float(match_res.group(2))
            cpayload = match_res.group(4)

            if not self.observed_start_time:
                # No explicit start time noted. Use 1st observed time:
                self.start_time_sec = log_time_stamp_in_sec
                self.observed_start_time = True

            if not self.observed_base_time:
                # No explicit base time noted.
                # Deduce from 1st observed time (compared to start time):
                if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0):
                    # Criteria Note: if log timestamp is more than a year in
                    # the past (compared to StartTime),
                    # we assume that timestamps in the log are not absolute
                    self.base_time_sec = self.start_time_sec
                else:
                    # Timestamps are absolute
                    self.base_time_sec = 0.0
                self.observed_base_time = True

            absolute_start_time_stamp_sec = \
                log_time_stamp_in_sec + self.base_time_sec
            offset_start_time_stamp_sec = \
                absolute_start_time_stamp_sec - self.start_time_sec

            # Timestamp length is expect to be in seconds
            absolute_end_time_stamp_sec = \
                absolute_start_time_stamp_sec + interval_length_sec

            if absolute:
                start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec
            else:
                start_time_stamp_to_check_range_on = offset_start_time_stamp_sec

            if start_time_stamp_to_check_range_on < range_start_time_sec:
                continue

            if start_time_stamp_to_check_range_on > range_end_time_sec:
                return None
            if dest_histogram:
                # add the interval histogram to the destination histogram
                histogram = dest_histogram
                histogram.decode_and_add(cpayload)
            else:
                histogram = HdrHistogram.decode(cpayload)
                histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0)
                histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0)
            return histogram
def test_large_numbers():
    histogram = HdrHistogram(20000000, 100000000, 17)
    histogram.record_value(100000000)
    histogram.record_value(20000000)
    histogram.record_value(30000000)
    assert(histogram.values_are_equivalent(20000000, histogram.get_value_at_percentile(50.0)))
    assert(histogram.values_are_equivalent(30000000, histogram.get_value_at_percentile(83.33)))
    assert(histogram.values_are_equivalent(100000000, histogram.get_value_at_percentile(83.34)))
    assert(histogram.values_are_equivalent(100000000, histogram.get_value_at_percentile(99.0)))
def test_empty_histogram():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    assert(histogram.get_min_value() == 0)
    assert(histogram.get_max_value() == 0)
    assert(histogram.get_mean_value() == 0)
    assert(histogram.get_stddev() == 0)
def load_corrected_histogram():
    histogram = HdrHistogram(LOWEST, HIGHEST, SIGNIFICANT)
    # record this value with a count of 10,000
    histogram.record_corrected_value(1000, INTERVAL, 10000)
    histogram.record_corrected_value(100000000, INTERVAL)
    return histogram
def test_out_of_range_values():
    histogram = HdrHistogram(1, 1000, 14)
    assert(histogram.record_value(32767))
    assert(histogram.record_value(32768) is False)