Ejemplo n.º 1
0
def _ParseContextSwitching(lines, title, metadata, results):
    """Parse the context switching test results.

  Context switching - times in microseconds - smaller is better.

  Args:
    lines: The lines following context switching title size=* ovr=*.
    title: The context switching subset title.
    metadata: A diction of metadata.
    results: A list of samples to be published.
  Context switching test results:
    "size=0k ovr=0.93
      2 11.57
      4 14.81
      8 14.69
      16 12.86
      24 12.50
      32 12.63
      64 13.30
      96 11.45
  """
    size = regex_util.ExtractGroup('"size=([0-9]*)', title)
    ovr = regex_util.ExtractGroup('"size=.* ovr=([0-9]*\\.[0-9]*)', title)

    metadata_clone = metadata.copy()
    metadata_clone['memory_size'] = '%sk' % size
    metadata_clone['ovr'] = ovr
    for line in lines:
        metric_value = line.split()
        current_metadata = metadata_clone.copy()
        current_metadata['num_of_processes'] = int(metric_value[0])
        results.append(
            sample.Sample('context_switching_time', float(metric_value[1]),
                          'microseconds', current_metadata))
Ejemplo n.º 2
0
def ParseResults(result, metadata):
    """Parse mcperf result into samples.

  Sample Output:
  #type       avg     std     min      p5     p10     p50     p67
  read      106.0    67.7    37.2    80.0    84.3   101.7   108.8
  update      0.0     0.0     0.0     0.0     0.0     0.0     0.0
  op_q       10.0     0.0     1.0     9.4     9.4     9.7     9.8

  Total QPS = 754451.6 (45267112 / 60.0s)

  Total connections = 8
  Misses = 0 (0.0%)
  Skipped TXs = 0 (0.0%)

  RX 11180976417 bytes :  177.7 MB/s
  TX          0 bytes :    0.0 MB/s
  CPU Usage Stats (avg/min/max): 31.85%,30.31%,32.77%

  Args:
    result: Text output of running mcperf benchmark.
    metadata: metadata associated with the results.

  Returns:
    List of sample.Sample objects and actual qps.
  """
    samples = []
    if FLAGS.mcperf_ratio < 1.0:
        # N/A for write only workloads.
        misses = regex_util.ExtractGroup(MISS_REGEX, result)
        metadata['miss_rate'] = float(misses)

    latency_stats = regex_util.ExtractGroup(LATENCY_HEADER_REGEX,
                                            result).split()
    # parse latency
    for metric in ('read', 'update', 'op_q'):
        latency_regex = metric + LATENCY_REGEX
        latency_values = regex_util.ExtractGroup(latency_regex, result).split()
        for idx, stat in enumerate(latency_stats):
            if idx == len(latency_values):
                logging.warning('Mutilate does not report %s latency for %s.',
                                stat, metric)
                break
            samples.append(
                sample.Sample(metric + '_' + stat, float(latency_values[idx]),
                              'usec', metadata))
    # parse bandwidth
    for metric in ('TX', 'RX'):
        bw_regex = metric + BANDWIDTH_REGEX
        bw = regex_util.ExtractGroup(bw_regex, result)
        samples.append(sample.Sample(metric, float(bw), 'MB/s', metadata))

    qps = regex_util.ExtractFloat(QPS_REGEX, result)
    samples.append(sample.Sample('qps', qps, 'ops/s', metadata))
    return samples, qps
Ejemplo n.º 3
0
def ParseResults(result, metadata):
  """Parse mutilate result into samples.

  Sample Output:
  #type       avg     min     1st     5th    10th    90th    95th    99th
  read       52.4    41.0    43.1    45.2    48.1    55.8    56.6    71.5
  update      0.0     0.0     0.0     0.0     0.0     0.0     0.0     0.0
  op_q        1.5     1.0     1.0     1.1     1.1     1.9     2.0     2.0

  Total QPS = 18416.6 (92083 / 5.0s)

  Misses = 0 (0.0%)

  RX   22744501 bytes :    4.3 MB/s
  TX    3315024 bytes :    0.6 MB/s

  Args:
    result: Text output of running mutilate benchmark.
    metadata: metadata associated with the results.

  Returns:
    List of sample.Sample objects and actual qps.
  """
  samples = []
  if FLAGS.mutilate_ratio < 1.0:
    # N/A for write only workloads.
    misses = regex_util.ExtractGroup(MISS_REGEX, result)
    metadata['miss_rate'] = float(misses)

  latency_stats = regex_util.ExtractGroup(LATENCY_HEADER_REGEX, result).split()
  # parse latency
  for metric in ('read', 'update', 'op_q'):
    latency_regex = metric + LATENCY_REGEX
    latency_values = regex_util.ExtractGroup(latency_regex, result).split()
    for idx, stat in enumerate(latency_stats):
      if idx == len(latency_values):
        logging.warning(
            'Mutilate does not report %s latency for %s.', stat, metric)
        break
      samples.append(
          sample.Sample(metric + '_' + stat,
                        float(latency_values[idx]),
                        'usec', metadata))
  # parse bandwidth
  for metric in ('TX', 'RX'):
    bw_regex = metric + BANDWIDTH_REGEX
    bw = regex_util.ExtractGroup(bw_regex, result)
    samples.append(
        sample.Sample(metric, float(bw), 'MB/s', metadata))

  qps = regex_util.ExtractFloat(QPS_REGEX, result)
  samples.append(sample.Sample('qps', qps, 'ops/s', metadata))
  return samples, qps
def Run(benchmark_spec):
    """Run openssl-speed on the target vm.

  Sample output:
  OpenSSL 1.1.1k  25 Mar 2021
  built on: Thu Mar 25 20:49:34 2021 UTC
  options:bn(64,64) rc4(16x,int) des(int) aes(partial) blowfish(ptr)
  compiler: gcc -fPIC -pthread -m64 -Wa ...
  evp 730303.56k 2506149.08k 4473725.34k 5640335.56k 6048576.31k 6107063.91k

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
      required to run the benchmark.

  Returns:
    A list of sample.Sample object.
  """
    vms = benchmark_spec.vms
    vm = vms[0]
    stderr, _ = vm.RemoteCommand('openssl version')
    version = regex_util.ExtractGroup(r'OpenSSL\s+([\w\.]+)\s+', stderr)
    parallelism = _OPENSSL_SPEED_MULTI.value or vm.NumCpusForBenchmark()
    raw_result, _ = vm.RemoteCommand(
        'openssl speed -elapsed '
        f'-seconds {_OPENSSL_SPEED_DURATION.value} '
        f'-evp {_OPENSSL_SPEED_ALGORITHM.value} '
        f'-multi {parallelism}')

    return ParseOpenSSLOutput(raw_result, version, parallelism)
Ejemplo n.º 5
0
def GetLastRunUri():
  """Returns the last run_uri used (or None if it can't be determined)."""
  if RunningOnWindows():
    cmd = ['powershell', '-Command',
           'gci %s | sort LastWriteTime | select -last 1' % TEMP_DIR]
  else:
    cmd = ['bash', '-c', 'ls -1t %s | head -1' % TEMP_DIR]
  stdout, _, _ = IssueCommand(cmd)
  try:
    return regex_util.ExtractGroup('run_([^\s]*)', stdout)
  except regex_util.NoMatchError:
    return None
Ejemplo n.º 6
0
def CollectResultFile(vm, results):
    """Collect result file on vm.

  Args:
    vm: The target vm.
    results: A dictionary of lists. Each list contains results of a field
       defined in RESULTS_METRICS collected from each loader machines.
  """
    result_path = _ResultFilePath(vm)
    vm.PullFile(vm_util.GetTempDir(), result_path)
    resp, _ = vm.RemoteCommand('tail -n 20 ' + result_path)
    for metric in RESULTS_METRICS:
        value = regex_util.ExtractGroup(r'%s[\t ]+: ([\d\.:]+)' % metric, resp)
        if metric == RESULTS_METRICS[-1]:  # Total operation time
            value = value.split(':')
            results[metric].append(
                int(value[0]) * 3600 + int(value[1]) * 60 + int(value[2]))
        else:
            results[metric].append(float(value))
Ejemplo n.º 7
0
def _AddProcessorMetricSamples(lmbench_output, processor_metric_list, metadata,
                               results):
    """Parse results for "Processor, Processes - times in microseconds - smaller is better."

  Args:
    lmbench_output: A string containing the test results of lmbench.
    processor_metric_list: A tuple of metrics.
    metadata: A dictionary of metadata.
    results: A list of samples to be published.
  Processor test output:
    Simple syscall: 0.2345 microseconds
    Simple read: 0.3515 microseconds
    Simple write: 0.3082 microseconds
    Simple stat: 0.6888 microseconds
    Simple fstat: 0.3669 microseconds
    Simple open/close: 1.5541 microseconds
    Select on 10 fd's: 0.4464 microseconds
    Select on 100 fd's: 1.0422 microseconds
    Select on 250 fd's: 2.0069 microseconds
    Select on 500 fd's: 3.7366 microseconds
    Select on 10 tcp fd's: 0.5690 microseconds
    Select on 100 tcp fd's: 6.4521 microseconds
    Select on 250 tcp fd's: 16.7513 microseconds
    Select on 500 tcp fd's: 32.8527 microseconds
    Signal handler installation: 0.3226 microseconds
    Signal handler overhead: 1.1736 microseconds
    Protection fault: 0.7491 microseconds
    Pipe latency: 25.5437 microseconds
    AF_UNIX sock stream latency: 25.2813 microseconds
    Process fork+exit: 121.7399 microseconds
    Process fork+execve: 318.6445 microseconds
    Process fork+/bin/sh -c: 800.2188 microseconds
    Pagefaults on /var/tmp/XXX: 0.1639 microseconds
  """

    for metric in processor_metric_list:
        regex = '%s: (.*)' % metric
        value_unit = regex_util.ExtractGroup(regex, lmbench_output)
        [value, unit] = value_unit.split(' ')
        results.append(
            sample.Sample('%s' % metric.replace('\\', ''), float(value), unit,
                          metadata))
Ejemplo n.º 8
0
def ParseHistogram(netperf_stdout):
    """Parses the histogram output from netperf.

    Args:
      netperf_output: string. The stdout from netperf containing a histogram.

    Returns:
      A dict mapping latency to sample count or None if the output did not
      contain a histogram.
    """
    # Here is an example of a netperf histogram:
    #
    # Histogram of request/response times
    # UNIT_USEC     :    0:    0:    0:    0:    0:    0:    0:    0:    0:    0
    # TEN_USEC      :    0:    0:    0:    0:    0:    0:    0:    0:    0:    0
    # HUNDRED_USEC  :    0: 433684: 9696:  872:  140:   56:   27:   28:   17:   10
    # UNIT_MSEC     :    0:   24:   57:   40:    5:    2:    0:    0:    0:    0
    # TEN_MSEC      :    0:    0:    0:    0:    0:    0:    0:    0:    0:    0
    # HUNDRED_MSEC  :    0:    0:    0:    0:    0:    0:    0:    0:    0:    0
    # UNIT_SEC      :    0:    0:    0:    0:    0:    0:    0:    0:    0:    0
    # TEN_SEC       :    0:    0:    0:    0:    0:    0:    0:    0:    0:    0
    # >100_SECS: 0
    # HIST_TOTAL:      444658
    histogram_text = regex_util.ExtractGroup('(UNIT_USEC.*?)>100_SECS',
                                             netperf_stdout,
                                             flags=re.S)

    # The total number of usecs that this row of the histogram represents.
    row_size = 10.0
    hist = {}

    for l in histogram_text.splitlines():
        buckets = [int(b) for b in l.split(':')[1:]]
        bucket_size = row_size / len(buckets)
        hist.update({(i * bucket_size): count
                     for i, count in enumerate(buckets) if count})
        # Each row is 10x larger than the previous row.
        row_size *= 10

    return hist
Ejemplo n.º 9
0
def _GetHDFSOnlineNodeCount(master):
    cmd = '{0} dfsadmin -report'.format(posixpath.join(HADOOP_BIN, 'hdfs'))
    stdout = master.RemoteCommand(cmd)[0]
    avail_str = regex_util.ExtractGroup(r'Live datanodes\s+\((\d+)\):', stdout)
    return int(avail_str)
Ejemplo n.º 10
0
 def testNumberedGroup_WholeMatch(self):
     regex = r'test [\da-f]+ (.*)'
     string = 'test 12a3de text'
     self.assertEqual(string, regex_util.ExtractGroup(regex,
                                                      string,
                                                      group=0))
Ejemplo n.º 11
0
 def testNumberedGroup_Valid(self):
     regex = r'test ([\da-f]+) (.*)'
     string = 'test 12a3de text'
     self.assertEqual('text', regex_util.ExtractGroup(regex,
                                                      string,
                                                      group=2))
Ejemplo n.º 12
0
 def testNamedGroup(self):
     regex = r'test (?P<hex>[\da-f]+) text'
     string = 'test 12a3de text'
     self.assertEqual('12a3de',
                      regex_util.ExtractGroup(regex, string, group='hex'))
Ejemplo n.º 13
0
 def testMatches_Unanchored(self):
     regex = r'([\da-f]+) text'
     string = 'test 12a3de text'
     self.assertEqual('12a3de',
                      regex_util.ExtractGroup(regex, string, group=1))
Ejemplo n.º 14
0
def _UpdataMetadata(lmbench_output, metadata):
    metadata['MB'] = regex_util.ExtractGroup('MB: ([0-9]*)', lmbench_output)
    metadata['BENCHMARK_HARDWARE'] = regex_util.ExtractGroup(
        'BENCHMARK_HARDWARE: (YES|NO)', lmbench_output)
    metadata['BENCHMARK_OS'] = regex_util.ExtractGroup(
        'BENCHMARK_OS: (YES|NO)', lmbench_output)
Ejemplo n.º 15
0
def _GetHDFSOnlineNodeCount(master):
    cmd = HDFS_CMD + ' dfsadmin -report'
    stdout = master.RemoteCommand(cmd)[0]
    avail_str = regex_util.ExtractGroup(r'Live datanodes\s+\((\d+)\):', stdout)
    return int(avail_str)