def Run(benchmark_spec):
  """Runs SPEC CPU2006 on the target vm.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """
  vm = benchmark_spec.vms[0]

  version_specific_parameters = []
  if FLAGS.runspec_metric == 'rate':
    version_specific_parameters.append(' --rate=%s ' % vm.NumCpusForBenchmark())
  else:
    version_specific_parameters.append(' --speed ')
  speccpu.Run(vm, 'runspec',
              FLAGS.benchmark_subset, version_specific_parameters)

  log_files = []
  # FIXME(liquncheng): Only reference runs generate SPEC scores. The log
  # id is hardcoded as 001, which might change with different runspec
  # parameters. SPEC CPU2006 will generate different logs for build, test
  # run, training run and ref run.
  if FLAGS.benchmark_subset in _SPECINT_BENCHMARKS | set(['int', 'all']):
    log_files.append('CINT2006.001.ref.txt')
  if FLAGS.benchmark_subset in _SPECFP_BENCHMARKS | set(['fp', 'all']):
    log_files.append('CFP2006.001.ref.txt')
  partial_results = FLAGS.benchmark_subset not in _SPECCPU_SUBSETS

  return speccpu.ParseOutput(vm, log_files, partial_results,
                             FLAGS.runspec_metric)
def Run(benchmark_spec):
  """Runs SPEC CPU2017 on the target vm.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """
  vm = benchmark_spec.vms[0]
  speccpu.Run(vm, 'ulimit -s 130000 && runcpu', ' '.join(FLAGS.spec17_subset))

  log_files = set()
  for test in FLAGS.spec17_subset:
    if test in LOG_FILENAME:
      log_files.add(LOG_FILENAME[test])
    else:
      if test in INTSPEED_SUITE:
        log_files.add(LOG_FILENAME['intspeed'])
      elif test in INTRATE_SUITE:
        log_files.add(LOG_FILENAME['intrate'])
      elif test in FPSPEED_SUITE:
        log_files.add(LOG_FILENAME['fpspeed'])
      elif test in FPRATE_SUITE:
        log_files.add(LOG_FILENAME['fprate'])

  return speccpu.ParseOutput(vm, log_files, False, None)
def Run(benchmark_spec):
    """Runs SPEC CPU2017 on the target vm.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """
    vm = benchmark_spec.vms[0]

    # swap only if necessary; free local node memory and avoid remote memory;
    # reset caches; set stack size to unlimited
    # Also consider setting enable_transparent_hugepages flag to true
    cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
           'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
           'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
           'ulimit -s unlimited && ')

    cmd += 'runcpu '

    lscpu = vm.CheckLsCpu()
    version_specific_parameters = []
    # rate runs require 2 GB minimum system main memory per copy,
    # not including os overhead
    # Refer to: https://www.spec.org/cpu2017/Docs/system-requirements.html#memory
    copies = min(lscpu.cores_per_socket * lscpu.socket_count,
                 vm.total_free_memory_kb / (2 * KB_TO_GB_MULTIPLIER))
    version_specific_parameters.append(' --copies=%s ' %
                                       (FLAGS.spec17_copies or copies))
    version_specific_parameters.append(' --threads=%s ' %
                                       (FLAGS.spec17_threads or vm.num_cpus))

    if FLAGS.spec17_fdo:
        version_specific_parameters.append('--feedback ')
        vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles')

    speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
                version_specific_parameters)

    log_files = set()
    for test in FLAGS.spec17_subset:
        if test in LOG_FILENAME:
            log_files.add(LOG_FILENAME[test])
        else:
            if test in INTSPEED_SUITE:
                log_files.add(LOG_FILENAME['intspeed'])
            elif test in INTRATE_SUITE:
                log_files.add(LOG_FILENAME['intrate'])
            elif test in FPSPEED_SUITE:
                log_files.add(LOG_FILENAME['fpspeed'])
            elif test in FPRATE_SUITE:
                log_files.add(LOG_FILENAME['fprate'])

    return speccpu.ParseOutput(vm, log_files, False, None)
예제 #4
0
def Run(benchmark_spec):
    """Runs SPEC CPU2017 on the target vm.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """
    vm = benchmark_spec.vms[0]

    # swap only if necessary; free local node memory and avoid remote memory;
    # reset caches; set stack size to unlimited
    # Also consider setting enable_transparent_hugepages flag to true
    cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
           'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
           'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
           'ulimit -s unlimited && ')

    # use numa node interleave policy if running rate
    if FLAGS.spec17_interleave_numa and ('intrate' in FLAGS.spec17_subset
                                         or 'fprate' in FLAGS.spec17_subset):
        cmd += 'numactl --interleave=all && '

    cmd += 'runcpu '

    lscpu = vm.CheckLsCpu()
    version_specific_parameters = []
    version_specific_parameters.append(
        ' --copies=%s ' %
        (FLAGS.spec17_copies or lscpu.cores_per_socket * lscpu.socket_count))
    version_specific_parameters.append(' --threads=%s ' %
                                       (FLAGS.spec17_threads or vm.num_cpus))
    if FLAGS.spec17_fdo:
        version_specific_parameters.append('--feedback ')

    speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
                version_specific_parameters)

    log_files = set()
    for test in FLAGS.spec17_subset:
        if test in LOG_FILENAME:
            log_files.add(LOG_FILENAME[test])
        else:
            if test in INTSPEED_SUITE:
                log_files.add(LOG_FILENAME['intspeed'])
            elif test in INTRATE_SUITE:
                log_files.add(LOG_FILENAME['intrate'])
            elif test in FPSPEED_SUITE:
                log_files.add(LOG_FILENAME['fpspeed'])
            elif test in FPRATE_SUITE:
                log_files.add(LOG_FILENAME['fprate'])

    return speccpu.ParseOutput(vm, log_files, False, None)
예제 #5
0
def _Run(vm):
    """See base method.

  Args:
    vm: The vm to run the benchmark on.

  Returns:
    A list of sample.Sample objects.
  """
    # swap only if necessary; free local node memory and avoid remote memory;
    # reset caches; set stack size to unlimited
    # Also consider setting enable_transparent_hugepages flag to true
    cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
           'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
           'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
           'ulimit -s unlimited && ')

    cmd += 'runcpu '
    if FLAGS.spec17_build_only:
        cmd += '--action build '
    if FLAGS.spec17_rebuild:
        cmd += '--rebuild '

    version_specific_parameters = []
    # rate runs require 2 GB minimum system main memory per copy,
    # not including os overhead. Refer to:
    # https://www.spec.org/cpu2017/Docs/system-requirements.html#memory
    copies = min(vm.NumCpusForBenchmark(),
                 vm.total_free_memory_kb // (2 * KB_TO_GB_MULTIPLIER))
    version_specific_parameters.append(' --copies=%s ' %
                                       (FLAGS.spec17_copies or copies))
    version_specific_parameters.append(
        ' --threads=%s ' % (FLAGS.spec17_threads or vm.NumCpusForBenchmark()))

    if FLAGS.spec17_fdo:
        version_specific_parameters.append('--feedback ')
        vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles')

    start_time = time.time()
    stdout, _ = speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
                            version_specific_parameters)

    if FLAGS.spec17_build_only:
        if 'Error' in stdout and 'Please review this file' in stdout:
            raise errors.Benchmarks.RunError('Error during SPEC compilation.')
        return [
            sample.Sample(
                'compilation_time',
                time.time() - start_time, 's', {
                    'spec17_subset': FLAGS.spec17_subset,
                    'gcc_version': build_tools.GetVersion(vm, 'gcc')
                })
        ]

    partial_results = True
    # Do not allow partial results if any benchmark subset is a full suite.
    for benchmark_subset in FLAGS.benchmark_subset:
        if benchmark_subset in ['intspeed', 'fpspeed', 'intrate', 'fprate']:
            partial_results = False

    log_files = set()
    for test in FLAGS.spec17_subset:
        if test in LOG_FILENAME:
            log_files.add(LOG_FILENAME[test])
        else:
            if test in INTSPEED_SUITE:
                log_files.add(LOG_FILENAME['intspeed'])
            elif test in INTRATE_SUITE:
                log_files.add(LOG_FILENAME['intrate'])
            elif test in FPSPEED_SUITE:
                log_files.add(LOG_FILENAME['fpspeed'])
            elif test in FPRATE_SUITE:
                log_files.add(LOG_FILENAME['fprate'])

    samples = speccpu.ParseOutput(vm, log_files, partial_results, None)
    for item in samples:
        item.metadata['vm_name'] = vm.name

    return samples