Beispiel #1
0
def _Run(vm):
    """See base method.

  Args:
    vm: The vm to run the benchmark on.

  Returns:
    A list of sample.Sample objects.
  """
    # swap only if necessary; free local node memory and avoid remote memory;
    # reset caches; set stack size to unlimited
    # Also consider setting enable_transparent_hugepages flag to true
    cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
           'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
           'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
           'ulimit -s unlimited && ')

    cmd += 'runcpu '
    if FLAGS.spec17_build_only:
        cmd += '--action build '
    if FLAGS.spec17_rebuild:
        cmd += '--rebuild '

    version_specific_parameters = []
    # rate runs require 2 GB minimum system main memory per copy,
    # not including os overhead. Refer to:
    # https://www.spec.org/cpu2017/Docs/system-requirements.html#memory
    copies = min(vm.NumCpusForBenchmark(),
                 vm.total_free_memory_kb // (2 * KB_TO_GB_MULTIPLIER))
    version_specific_parameters.append(' --copies=%s ' %
                                       (FLAGS.spec17_copies or copies))
    version_specific_parameters.append(
        ' --threads=%s ' % (FLAGS.spec17_threads or vm.NumCpusForBenchmark()))

    if FLAGS.spec17_fdo:
        version_specific_parameters.append('--feedback ')
        vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles')

    start_time = time.time()
    stdout, _ = speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
                            version_specific_parameters)

    if FLAGS.spec17_build_only:
        if 'Error' in stdout and 'Please review this file' in stdout:
            raise errors.Benchmarks.RunError('Error during SPEC compilation.')
        return [
            sample.Sample(
                'compilation_time',
                time.time() - start_time, 's', {
                    'spec17_subset': FLAGS.spec17_subset,
                    'gcc_version': build_tools.GetVersion(vm, 'gcc')
                })
        ]

    partial_results = True
    # Do not allow partial results if any benchmark subset is a full suite.
    for benchmark_subset in FLAGS.benchmark_subset:
        if benchmark_subset in ['intspeed', 'fpspeed', 'intrate', 'fprate']:
            partial_results = False

    log_files = set()
    for test in FLAGS.spec17_subset:
        if test in LOG_FILENAME:
            log_files.add(LOG_FILENAME[test])
        else:
            if test in INTSPEED_SUITE:
                log_files.add(LOG_FILENAME['intspeed'])
            elif test in INTRATE_SUITE:
                log_files.add(LOG_FILENAME['intrate'])
            elif test in FPSPEED_SUITE:
                log_files.add(LOG_FILENAME['fpspeed'])
            elif test in FPRATE_SUITE:
                log_files.add(LOG_FILENAME['fprate'])

    samples = speccpu.ParseOutput(vm, log_files, partial_results, None)
    for item in samples:
        item.metadata['vm_name'] = vm.name

    return samples
def _ExtractScore(stdout, vm, keep_partial_results, runspec_metric):
    """Extracts the SPEC(int|fp) score from stdout.

  Args:
    stdout: String. stdout from running RemoteCommand.
    vm: The vm instance where SPEC CPU was run.
    keep_partial_results: Boolean. True if partial results should
        be extracted in the event that not all benchmarks were successfully
        run. See the "runspec_keep_partial_results" flag for more info.
    runspec_metric: String. Indicates whether this is spec speed or rate run.

  Sample input for SPECint (Refer to unit test for more examples):
      ...
      ...Base                                               Peak
      ============================================= ==========================
      400.perlbench    9770        417       23.4 * 9770        417       23.4 *
      401.bzip2        9650        565       17.1 * 9650        565       17.1 *
      403.gcc          8050        364       22.1 *
      429.mcf          9120        364       25.1 *
      445.gobmk       10490        499       21.0 *
      456.hmmer        9330        491       19.0 *
      458.sjeng       12100        588       20.6 *
      462.libquantum  20720        468       44.2 *
      464.h264ref     22130        700       31.6 *
      471.omnetpp      6250        349       17.9 *
      473.astar        7020        482       14.6 *
      483.xalancbmk    6900        248       27.8 *
       Est. SPECint(R)_base2006              22.7
       Est. SPECint(R)_peak2006                                          20

  Sample input for SPECfp:
      ...
      ...Base                                              Peak
      ============================================= ============================
      410.bwaves      13590        717      19.0  * 13550      710       19.0  *
      416.gamess      19580        923      21.2  *
      433.milc         9180        480      19.1  *
      434.zeusmp       9100        600      15.2  *
      435.gromacs      7140        605      11.8  *
      436.cactusADM   11950       1289       9.27 *
      437.leslie3d     9400        859      10.9  *
      444.namd         8020        504      15.9  *
      447.dealII      11440        409      28.0  *
      450.soplex       8340        272      30.6  *
      453.povray       5320        231      23.0  *
      454.calculix     8250        993       8.31 *
      459.GemsFDTD    10610        775      13.7  *
      465.tonto        9840        565      17.4  *
      470.lbm         13740        365      37.7  *
      481.wrf         11170        788      14.2  *
      482.sphinx3     19490        668      29.2  *
       Est. SPECfp(R)_base2006              17.5
       Est. SPECfp(R)_peak2006                                          20

  Returns:
      A list of sample.Sample objects.
  """
    results = []
    speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
    re_begin_section = re.compile('^={1,}')
    re_end_section = re.compile(speccpu_vm_state.log_format)
    result_section = []
    in_result_section = False
    at_peak_results_line, peak_name, peak_score = False, None, None

    # Extract the summary section
    for line in stdout.splitlines():
        if in_result_section:
            result_section.append(line)
        # search for begin of result section
        match = re.search(re_begin_section, line)
        if match:
            assert not in_result_section
            in_result_section = True
            continue
        # search for end of result section
        match = re.search(re_end_section, line)
        if at_peak_results_line:
            _, peak_name, peak_score = line.split()
            at_peak_results_line = False
        if match:
            assert in_result_section
            spec_name = str(match.group(1))
            if runspec_metric == 'speed':
                spec_name += ':speed'
            try:
                spec_score = float(match.group(2))
            except ValueError:
                # Partial results may get reported as '--' instead of a number.
                spec_score = None
            if FLAGS.spec_runmode != BASE_MODE:
                at_peak_results_line = True
            in_result_section = False
            # remove the final SPEC(int|fp) score, which has only 2 columns.
            result_section.pop()

    metadata = {
        'runspec_config': speccpu_vm_state.runspec_config,
        'runspec_config_md5sum':
        _GenerateMd5sum(speccpu_vm_state.runspec_config),
        'runspec_iterations': str(FLAGS.runspec_iterations),
        'runspec_enable_32bit': str(FLAGS.runspec_enable_32bit),
        'runspec_define': FLAGS.runspec_define,
        'runspec_metric': runspec_metric,
        'spec_runmode': FLAGS.spec_runmode,
        'spec17_copies': FLAGS.spec17_copies,
        'spec17_threads': FLAGS.spec17_threads,
        'spec17_fdo': FLAGS.spec17_fdo,
        'spec17_subset': FLAGS.spec17_subset,
        'gcc_version': build_tools.GetVersion(vm, 'gcc')
    }

    missing_results = []
    scores = []

    for benchmark in result_section:
        # Skip over failed runs, but count them since they make the overall
        # result invalid.
        not_reported = benchmark.count('NR')
        if not_reported > 1 or (not_reported == 1
                                and FLAGS.spec_runmode != PEAK_MODE):
            logging.warning('SPEC CPU missing result: %s', benchmark)
            missing_results.append(str(benchmark.split()[0]))
            continue

        base_score_str, peak_score_str = None, None
        if FLAGS.spec_runmode == BASE_MODE:
            # name, copies/threads, time, score, misc
            name, _, _, base_score_str, _ = benchmark.split()
        elif FLAGS.spec_runmode == PEAK_MODE:
            # name, base_not_reported(NR), copies/threads, time, score, misc
            name, _, _, _, peak_score_str, _ = benchmark.split()
        else:
            # name, copies/threads, base time, base score, base misc,
            #       copies/threads, peak time, peak score, peak misc
            name, _, _, base_score_str, _, _, _, peak_score_str, _ = benchmark.split(
            )
        if runspec_metric == 'speed':
            name += ':speed'
        if base_score_str:
            base_score_float = float(base_score_str)
            scores.append(base_score_float)
            results.append(
                sample.Sample(str(name), base_score_float, '', metadata))
        if peak_score_str:
            peak_score_float = float(peak_score_str)
            results.append(
                sample.Sample(
                    str(name) + ':peak', peak_score_float, '', metadata))

    if spec_score is None and FLAGS.spec_runmode != PEAK_MODE:
        missing_results.append(spec_name)

    if missing_results:
        if keep_partial_results:
            metadata['partial'] = 'true'
            metadata['missing_results'] = ','.join(missing_results)
        else:
            raise errors.Benchmarks.RunError(
                'speccpu: results missing, see log: ' +
                ','.join(missing_results))

    if spec_score:
        results.append(sample.Sample(spec_name, spec_score, '', metadata))
    elif FLAGS.runspec_estimate_spec:
        estimated_spec_score = _GeometricMean(scores)
        results.append(
            sample.Sample('estimated_' + spec_name, estimated_spec_score, '',
                          metadata))
    if peak_score:
        results.append(
            sample.Sample(peak_name, float(peak_score), '', metadata))

    return results