Пример #1
0
    def testParsesHistogram(self):
        expected = {
            300: 5771,
            400: 118948,
            500: 7121,
            600: 639,
            700: 199,
            800: 90,
            900: 53,
            1000: 149,
            2000: 31,
            3000: 11,
            4000: 8,
            5000: 1,
            6000: 1,
            7000: 1,
            9000: 1
        }

        hist = netperf.ParseHistogram(self.netperf_output)
        self.assertEqual(hist, expected)
def ParseNetperfOutput(stdout, metadata, benchmark_name,
                       enable_latency_histograms):
  """Parses the stdout of a single netperf process.

  Args:
    stdout: the stdout of the netperf process
    metadata: metadata for any sample.Sample objects we create

  Returns:
    A tuple containing (throughput_sample, latency_samples, latency_histogram)
  """
  # Don't modify the metadata dict that was passed in
  metadata = metadata.copy()

  # Extract stats from stdout
  # Sample output:
  #
  # "MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 20001
  # AF_INET to 104.154.50.86 () port 20001 AF_INET : +/-2.500% @ 99% conf.
  # : first burst 0",\n
  # Throughput,Throughput Units,Throughput Confidence Width (%),
  # Confidence Iterations Run,Stddev Latency Microseconds,
  # 50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,
  # 99th Percentile Latency Microseconds,Minimum Latency Microseconds,
  # Maximum Latency Microseconds\n
  # 1405.50,Trans/s,2.522,4,783.80,683,735,841,600,900\n
  try:
    fp = io.StringIO(stdout)
    # "-o" flag above specifies CSV output, but there is one extra header line:
    banner = next(fp)
    assert banner.startswith('MIGRATED'), stdout
    r = csv.DictReader(fp)
    results = next(r)
    logging.info('Netperf Results: %s', results)
    assert 'Throughput' in results
  except:
    raise Exception('Netperf ERROR: Failed to parse stdout. STDOUT: %s' %
                    stdout)

  # Update the metadata with some additional infos
  meta_keys = [('Confidence Iterations Run', 'confidence_iter'),
               ('Throughput Confidence Width (%)', 'confidence_width_percent')]
  metadata.update({meta_key: results[netperf_key]
                   for netperf_key, meta_key in meta_keys})

  # Create the throughput sample
  throughput = float(results['Throughput'])
  throughput_units = results['Throughput Units']
  if throughput_units == '10^6bits/s':
    # TCP_STREAM benchmark
    unit = MBPS
    metric = '%s_Throughput' % benchmark_name
  elif throughput_units == 'Trans/s':
    # *RR benchmarks
    unit = TRANSACTIONS_PER_SECOND
    metric = '%s_Transaction_Rate' % benchmark_name
  else:
    raise ValueError('Netperf output specifies unrecognized throughput units %s'
                     % throughput_units)
  throughput_sample = sample.Sample(metric, throughput, unit, metadata)

  latency_hist = None
  latency_samples = []
  if enable_latency_histograms:
    # Parse the latency histogram. {latency: count} where "latency" is the
    # latency in microseconds with only 2 significant figures and "count" is the
    # number of response times that fell in that latency range.
    latency_hist = netperf.ParseHistogram(stdout)
    hist_metadata = {'histogram': json.dumps(latency_hist)}
    hist_metadata.update(metadata)
    latency_samples.append(sample.Sample(
        '%s_Latency_Histogram' % benchmark_name, 0, 'us', hist_metadata))
  if unit != MBPS:
    for metric_key, metric_name in [
        ('50th Percentile Latency Microseconds', 'p50'),
        ('90th Percentile Latency Microseconds', 'p90'),
        ('99th Percentile Latency Microseconds', 'p99'),
        ('Minimum Latency Microseconds', 'min'),
        ('Maximum Latency Microseconds', 'max'),
        ('Stddev Latency Microseconds', 'stddev')]:
      if metric_key in results:
        latency_samples.append(
            sample.Sample('%s_Latency_%s' % (benchmark_name, metric_name),
                          float(results[metric_key]), 'us', metadata))

  return (throughput_sample, latency_samples, latency_hist)
Пример #3
0
def ParseNetperfOutput(stdout,
                       metadata,
                       benchmark_name,
                       enable_latency_histograms,
                       instance=None):
    """Parses the stdout of a single netperf process.

  Args:
    stdout: the stdout of the netperf process
    metadata: metadata for any sample.Sample objects we create
    benchmark_name: the name of the netperf benchmark
    enable_latency_histograms: bool indicating if latency histograms are
        included in stdout
    instance: on which machines the test has been executed

  Returns:
    A tuple containing (throughput_sample, latency_samples, latency_histogram)
  """
    # Don't modify the metadata dict that was passed in
    metadata = metadata.copy()

    # Extract stats from stdout
    # Sample output:
    #
    # "MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 20001
    # AF_INET to 104.154.50.86 () port 20001 AF_INET : +/-2.500% @ 99% conf.
    # : first burst 0",\n
    # Throughput,Throughput Units,Throughput Confidence Width (%),
    # Confidence Iterations Run,Stddev Latency Microseconds,
    # 50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,
    # 99th Percentile Latency Microseconds,Minimum Latency Microseconds,
    # Maximum Latency Microseconds\n
    # 1405.50,Trans/s,2.522,4,783.80,683,735,841,600,900\n
    try:
        fp = six.StringIO(stdout)
        # "-o" flag above specifies CSV output, but there is one extra header line:
        banner = next(fp)
        assert banner.startswith('MIGRATED'), stdout
        r = csv.DictReader(fp)
        results = next(r)
        logging.info('Netperf Results: %s', results)
        assert 'Throughput' in results
    except (StopIteration, AssertionError):
        # The output returned by netperf was unparseable - usually due to a broken
        # connection or other error.  Raise KnownIntermittentError to signal the
        # benchmark can be retried.  Do not automatically retry as an immediate
        # retry on these VMs may be adveresly affected (e.g. burstable credits
        # partially used)
        message = 'Netperf ERROR: Failed to parse stdout. STDOUT: %s' % stdout
        logging.error(message)
        raise errors.Benchmarks.KnownIntermittentError(message)

    # Update the metadata with some additional infos
    meta_keys = [('Confidence Iterations Run', 'confidence_iter'),
                 ('Throughput Confidence Width (%)',
                  'confidence_width_percent')]
    if 'TCP' in benchmark_name:
        meta_keys.extend([
            ('Local Transport Retransmissions', 'netperf_retransmissions'),
            ('Remote Transport Retransmissions', 'netserver_retransmissions'),
        ])

    metadata.update({
        meta_key: results[netperf_key]
        for netperf_key, meta_key in meta_keys
    })

    # Create the throughput sample
    throughput = float(results['Throughput'])
    throughput_units = results['Throughput Units']
    if throughput_units == '10^6bits/s':
        # TCP_STREAM benchmark
        unit = MBPS
        metric = '%s_Throughput' % benchmark_name
    elif throughput_units == 'Trans/s':
        # *RR benchmarks
        unit = TRANSACTIONS_PER_SECOND
        metric = '%s_Transaction_Rate' % benchmark_name
    else:
        raise ValueError(
            'Netperf output specifies unrecognized throughput units %s' %
            throughput_units)
    throughput_sample = sample.Sample(metric,
                                      throughput,
                                      unit,
                                      metadata,
                                      instance=instance)

    latency_hist = None
    latency_samples = []
    if enable_latency_histograms:
        # Parse the latency histogram. {latency: count} where "latency" is the
        # latency in microseconds with only 2 significant figures and "count" is the
        # number of response times that fell in that latency range.
        latency_hist = netperf.ParseHistogram(stdout)
        hist_metadata = {'histogram': json.dumps(latency_hist)}
        hist_metadata.update(metadata)
        latency_samples.append(
            sample.Sample('%s_Latency_Histogram' % benchmark_name,
                          0,
                          'us',
                          hist_metadata,
                          instance=instance))
    if unit != MBPS:
        for metric_key, metric_name in [
            ('50th Percentile Latency Microseconds', 'p50'),
            ('90th Percentile Latency Microseconds', 'p90'),
            ('99th Percentile Latency Microseconds', 'p99'),
            ('Minimum Latency Microseconds', 'min'),
            ('Maximum Latency Microseconds', 'max'),
            ('Stddev Latency Microseconds', 'stddev')
        ]:
            if metric_key in results:
                latency_samples.append(
                    sample.Sample('%s_Latency_%s' %
                                  (benchmark_name, metric_name),
                                  float(results[metric_key]),
                                  'us',
                                  metadata,
                                  instance=instance))

    return (throughput_sample, latency_samples, latency_hist)
def RunNetperf(vm, benchmark_name, server_ip):
    """Spawns netperf on a remove VM, parses results.

  Args:
    vm: The VM that the netperf TCP_RR benchmark will be run upon.
    benchmark_name: The netperf benchmark to run, see the documentation.
    server_ip: A machine that is running netserver.

  Returns:
    A sample.Sample object with the result.
  """
    # Flags:
    # -o specifies keys to include in CSV output.
    # -j keeps additional latency numbers
    # -v sets the verbosity level so that netperf will print out histograms
    # -I specifies the confidence % and width - here 99% confidence that the true
    #    value is within +/- 2.5% of the reported value
    # -i specifies the maximum and minimum number of iterations.
    confidence = ('-I 99,5 -i {0},3'.format(FLAGS.netperf_max_iter)
                  if FLAGS.netperf_max_iter else '')
    verbosity = '-v2 ' if FLAGS.netperf_enable_histograms else ''
    netperf_cmd = (
        '{netperf_path} -p {command_port} -j {verbosity}'
        '-t {benchmark_name} -H {server_ip} -l {length} {confidence} '
        ' -- '
        '-P {data_port} '
        '-o THROUGHPUT,THROUGHPUT_UNITS,P50_LATENCY,P90_LATENCY,'
        'P99_LATENCY,STDDEV_LATENCY,'
        'MIN_LATENCY,MAX_LATENCY,'
        'CONFIDENCE_ITERATION,THROUGHPUT_CONFID').format(
            netperf_path=netperf.NETPERF_PATH,
            benchmark_name=benchmark_name,
            server_ip=server_ip,
            command_port=COMMAND_PORT,
            data_port=DATA_PORT,
            length=FLAGS.netperf_test_length,
            confidence=confidence,
            verbosity=verbosity)
    stdout, _ = vm.RemoteCommand(netperf_cmd,
                                 timeout=2 * FLAGS.netperf_test_length *
                                 (FLAGS.netperf_max_iter or 1))

    fp = io.StringIO(stdout)
    # "-o" flag above specifies CSV output, but there is one extra header line:
    banner = next(fp)
    assert banner.startswith('MIGRATED'), stdout
    r = csv.DictReader(fp)
    row = next(r)
    logging.info('Netperf Results: %s', row)
    assert 'Throughput' in row, row

    value = float(row['Throughput'])
    unit = {
        'Trans/s': TRANSACTIONS_PER_SECOND,
        '10^6bits/s': MBPS
    }[row['Throughput Units']]
    if unit == MBPS:
        metric = '%s_Throughput' % benchmark_name
    else:
        metric = '%s_Transaction_Rate' % benchmark_name

    meta_keys = [('Confidence Iterations Run', 'confidence_iter'),
                 ('Throughput Confidence Width (%)',
                  'confidence_width_percent')]
    metadata = {meta_key: row[np_key] for np_key, meta_key in meta_keys}
    metadata.update(netperf_test_length=FLAGS.netperf_test_length,
                    max_iter=FLAGS.netperf_max_iter or 1)

    samples = [sample.Sample(metric, value, unit, metadata)]

    # No tail latency for throughput.
    if unit == MBPS:
        return samples

    if FLAGS.netperf_enable_histograms:
        # Generate a sample containing the entire histogram of
        # latencies.
        hist = netperf.ParseHistogram(stdout)
        hist_metadata = {'histogram': json.dumps(hist)}
        hist_metadata.update(metadata)
        samples.append(
            sample.Sample('%s_Latency_Histogram' % benchmark_name, 0, 'us',
                          hist_metadata))

    for metric_key, metric_name in [
        ('50th Percentile Latency Microseconds', 'p50'),
        ('90th Percentile Latency Microseconds', 'p90'),
        ('99th Percentile Latency Microseconds', 'p99'),
        ('Minimum Latency Microseconds', 'min'),
        ('Maximum Latency Microseconds', 'max'),
        ('Stddev Latency Microseconds', 'stddev')
    ]:
        samples.append(
            sample.Sample('%s_Latency_%s' % (benchmark_name, metric_name),
                          float(row[metric_key]), 'us', metadata))
    return samples