from perfkitbenchmarker import regex_util

NVIDIA_DRIVER_LOCATION_BASE = 'https://us.download.nvidia.com/tesla'

NVIDIA_TESLA_K80 = 'k80'
NVIDIA_TESLA_P4 = 'p4'
NVIDIA_TESLA_P100 = 'p100'
NVIDIA_TESLA_V100 = 'v100'
NVIDIA_TESLA_T4 = 't4'
NVIDIA_TESLA_A100 = 'a100'
NVIDIA_TESLA_A10G = 'a10g'

EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)'

flag_util.DEFINE_integerlist(
    'gpu_clock_speeds', None, 'desired gpu clock speeds in the form '
    '[memory clock, graphics clock]')

flags.DEFINE_boolean('gpu_autoboost_enabled', None,
                     'whether gpu autoboost is enabled')

flags.DEFINE_string(
    'nvidia_driver_version', '510.47.03',
    'The version of nvidia driver to install. '
    'For example, "418.67" or "418.87.01."')
flags.DEFINE_boolean(
    'nvidia_driver_force_install', False,
    'Whether to install NVIDIA driver, even if it is already '
    'installed.')

flags.DEFINE_string('nvidia_driver_x_library_path', '/usr/lib',
예제 #2
0
flags.DEFINE_string(
    'specsfs2014_config', None,
    'This flag can be used to specify an alternate SPEC config file to use. '
    'If this option is specified, none of the other benchmark specific flags '
    'which operate on the config file will be used (since the default config '
    'file will be replaced by this one).')
flags.DEFINE_list('specsfs2014_benchmarks', BENCHMARKS,
                  'The SPEC SFS 2014 benchmarks to run.')
flags.register_validator(
    'specsfs2014_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(BENCHMARKS),
    'Invalid benchmarks list. specsfs2014_benchmarks must be a subset of ' +
    ', '.join(BENCHMARKS))
flag_util.DEFINE_integerlist(
    'specsfs2014_load', [1],
    'The starting load in units of SPEC "business metrics". The meaning of '
    'business metric varies depending on the SPEC benchmark (e.g. VDI has '
    'load measured in virtual desktops).',
    module_name=__name__)
flags.DEFINE_integer('specsfs2014_incr_load',
                     1,
                     'The amount to increment "load" by for each run.',
                     lower_bound=1)
flags.DEFINE_integer(
    'specsfs2014_num_runs',
    1, 'The total number of SPEC runs. The load for the nth run is '
    '"load" + n * "specsfs_incr_load".',
    lower_bound=1)

BENCHMARK_NAME = 'specsfs2014'
BENCHMARK_CONFIG = """
specsfs2014:
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing CUDA toolkit 8 installation and cleanup functions."""

from perfkitbenchmarker import regex_util
from perfkitbenchmarker import flags
from perfkitbenchmarker import flag_util

TESLA_K80_MAX_CLOCK_SPEEDS = [2505, 875]
flag_util.DEFINE_integerlist(
    'gpu_clock_speeds', flag_util.IntegerList(TESLA_K80_MAX_CLOCK_SPEEDS),
    'desired gpu clock speeds in the form '
    '[memory clock, graphics clock]')

FLAGS = flags.FLAGS

# TODO: Test the CUDA Ubuntu 14.04 installer, and if everything works ok,
# automatically install the correct package depending on the OS image.
CUDA_TOOLKIT_UBUNTU = 'cuda-repo-ubuntu1604_8.0.61-1_amd64.deb'
CUDA_TOOLKIT_UBUNTU_URL = ('http://developer.download.nvidia.com/compute/cuda'
                           '/repos/ubuntu1604/x86_64/%s' % CUDA_TOOLKIT_UBUNTU)
CUDA_TOOLKIT_INSTALL_DIR = '/usr/local/cuda'
EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)'


class UnsupportedClockSpeedException(Exception):
    pass
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import flag_util
from perfkitbenchmarker.linux_packages import shoc_benchmark_suite
from perfkitbenchmarker.linux_packages import cuda_toolkit

flags.DEFINE_integer(
    'stencil2d_iterations', 5, 'number of iterations to run', lower_bound=1)

flag_util.DEFINE_integerlist(
    'stencil2d_problem_sizes',
    flag_util.IntegerList([4096]),
    'problem sizes to run. Can specify a single '
    'number, like --stencil2d_problem_sizes=4096 '
    'or a list like --stencil2d_problem_sizes='
    '1024,4096',
    on_nonincreasing=flag_util.IntegerListParser.WARN, module_name=__name__)
FLAGS = flags.FLAGS

MACHINEFILE = 'machinefile'
BENCHMARK_NAME = 'stencil2d'
BENCHMARK_VERSION = '0.25'
BENCHMARK_CONFIG = """
stencil2d:
  description: Runs Stencil2D from SHOC Benchmark Suite.\
      Specify the number of VMs with --num_vms
  vm_groups:
    default:
      vm_spec:
예제 #5
0
        Azure:
          machine_type: Standard_NC6
          zone: eastus
          boot_disk_size: 200
      vm_count: null
"""

flags.DEFINE_integer(
    'hpcg_runtime', 60, 'hpcg runtime in seconds', lower_bound=1)

flags.DEFINE_integer(
    'hpcg_gpus_per_node', None, 'The number of gpus per node.', lower_bound=1)

flag_util.DEFINE_integerlist(
    'hpcg_problem_size',
    flag_util.IntegerList([256, 256, 256]),
    'three dimensional problem size for each node. Must contain '
    'three integers', module_name=__name__)


class HpcgParseOutputException(Exception):
  pass


class HpcgIncorrectProblemSizeException(Exception):
  pass


def GetConfig(user_config):
  """Load and return benchmark config.
from perfkitbenchmarker.linux_packages import netperf

flags.DEFINE_integer('netperf_max_iter', None,
                     'Maximum number of iterations to run during '
                     'confidence interval estimation. If unset, '
                     'a single iteration will be run.',
                     lower_bound=3, upper_bound=30)

flags.DEFINE_integer('netperf_test_length', 60,
                     'netperf test length, in seconds',
                     lower_bound=1)
flags.DEFINE_bool('netperf_enable_histograms', True,
                  'Determines whether latency histograms are '
                  'collected/reported. Only for *RR benchmarks')
flag_util.DEFINE_integerlist('netperf_num_streams', flag_util.IntegerList([1]),
                             'Number of netperf processes to run. Netperf '
                             'will run once for each value in the list.',
                             module_name=__name__)
flags.DEFINE_integer('netperf_thinktime', 0,
                     'Time in nanoseconds to do work for each request.')
flags.DEFINE_integer('netperf_thinktime_array_size', 0,
                     'The size of the array to traverse for thinktime.')
flags.DEFINE_integer('netperf_thinktime_run_length', 0,
                     'The number of contiguous numbers to sum at a time in the '
                     'thinktime array.')

ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR']
flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS,
                  'The netperf benchmark(s) to run.')
flags.register_validator(
    'netperf_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_BENCHMARKS))
Runs Iperf to collect network throughput.
"""

import logging
import re

from perfkitbenchmarker import configs
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util

flag_util.DEFINE_integerlist('iperf_sending_thread_count',
                             flag_util.IntegerList([1]),
                             'server for sending traffic. Iperf'
                             'will run once for each value in the list',
                             module_name=__name__)
flags.DEFINE_integer('iperf_runtime_in_seconds', 60,
                     'Number of seconds to run iperf.',
                     lower_bound=1)
flags.DEFINE_integer('iperf_timeout', None,
                     'Number of seconds to wait in '
                     'addition to iperf runtime before '
                     'killing iperf client command.',
                     lower_bound=1)

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'iperf'
BENCHMARK_CONFIG = """
예제 #8
0
                     30,
                     'number of iterations to run',
                     lower_bound=1)

flags.DEFINE_enum(
    'gpu_pcie_bandwidth_mode', 'quick', ['quick', 'range'],
    'bandwidth test mode to use. '
    'If range is selected, provide desired range '
    'in flag gpu_pcie_bandwidth_transfer_sizes. '
    'Additionally, if range is selected, the resulting '
    'bandwidth will be averaged over all provided transfer '
    'sizes.')

flag_util.DEFINE_integerlist(
    'gpu_pcie_bandwidth_transfer_sizes',
    flag_util.IntegerList(
        [DEFAULT_RANGE_START, DEFAULT_RANGE_END, DEFAULT_RANGE_STEP]),
    'range of transfer sizes to use in bytes. '
    'Only used if gpu_pcie_bandwidth_mode is set to range')

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'gpu_pcie_bandwidth'
BENCHMARK_CONFIG = """
gpu_pcie_bandwidth:
  description: Runs NVIDIA's CUDA bandwidth test.
  vm_groups:
    default:
      vm_spec:
        GCP:
          image: ubuntu-1604-xenial-v20161115
          image_project: ubuntu-os-cloud
예제 #9
0
flags.DEFINE_integer(
    'memtier_run_duration', None, 'Mutually exclusive with memtier_requests.'
    'Duration for each client count in seconds. '
    'By default, test length is set '
    'by memtier_requests, the number of requests sent by each '
    'client. By specifying run_duration, key space remains '
    'the same (from 1 to memtier_requests), but test stops '
    'once run_duration is passed. '
    'Total test duration = run_duration * runs * '
    'len(memtier_clients).')
flags.DEFINE_integer(
    'memtier_requests', 10000, 'Mutually exclusive with memtier_run_duration. '
    'Number of total requests per client. Defaults to 10000.')
flag_util.DEFINE_integerlist(
    'memtier_clients', [50],
    'Comma separated list of number of clients per thread. '
    'Specify more than 1 value to vary the number of clients. '
    'Defaults to [50].')
flag_util.DEFINE_integerlist('memtier_threads', [4],
                             'Number of threads. Defaults to 4.')
flags.DEFINE_integer(
    'memtier_ratio', 9,
    'Set:Get ratio. Defaults to 9x Get versus Sets (9 Gets to '
    '1 Set in 10 total requests).')
flags.DEFINE_integer('memtier_data_size', 32,
                     'Object data size. Defaults to 32 bytes.')
flags.DEFINE_string(
    'memtier_key_pattern', 'R:R',
    'Set:Get key pattern. G for Gaussian distribution, R for '
    'uniform Random, S for Sequential. Defaults to R:R.')
flags.DEFINE_integer(
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample

flags.DEFINE_integer(
    'pgbench_scale_factor', 1, 'scale factor used to fill the database',
    lower_bound=1)
flags.DEFINE_integer(
    'pgbench_seconds_per_test', 10, 'number of seconds to run each test phase',
    lower_bound=1)
flags.DEFINE_integer(
    'pgbench_seconds_to_pause_before_steps', 30,
    'number of seconds to pause before each client load step')
flag_util.DEFINE_integerlist(
    'pgbench_client_counts',
    flag_util.IntegerList([1]),
    'array of client counts passed to pgbench', module_name=__name__)
FLAGS = flags.FLAGS


BENCHMARK_NAME = 'pgbench'
BENCHMARK_CONFIG = """
pgbench:
  description: pgbench benchmark for managed PostgreSQL databases
  managed_relational_db:
    engine: postgres
    vm_spec:
      GCP:
        machine_type:
          cpus: 16
          memory: 64GiB
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample

CODEC_H264 = 'h264'
CODEC_VP9 = 'vp9'
DEFAULT_H264_THREADS_LIST = [4, 8]
DEFAULT_VP9_THREADS_LIST = [1]

_FFMPEG_CODECS = flags.DEFINE_list(
    'ffmpeg_codecs', [CODEC_H264],
    'List of the codecs to use for the transcoding benchmark. '
    'For now, this is some combination of h264 and vp9.')
_FFMPEG_THREADS_LIST = flag_util.DEFINE_integerlist(
    'ffmpeg_threads_list', None,
    'List of threads to give to each ffmpeg job. Defaults to '
    '[4, 8] for h.264 and [1] for vp9.')
_FFMPEG_PARALLELISM_LIST = flag_util.DEFINE_integerlist(
    'ffmpeg_parallelism_list', None,
    'List of ffmpeg-jobs to run in parallel. Defaults to '
    '[number of logical CPUs].')
_FFMPEG_DIR = flags.DEFINE_string(
    'ffmpeg_dir', '/usr/bin',
    'Directory where ffmpeg and ffprobe are located.')

_VALID_CODECS = [CODEC_H264, CODEC_VP9]
flags.register_validator(
    'ffmpeg_codecs', lambda codecs: all([c in _VALID_CODECS for c in codecs]))

FLAGS = flags.FLAGS
예제 #12
0
def _DefineMemorySizeFlag(name, default, help, flag_values=FLAGS, **kwargs):
    flags.DEFINE(_MEMORY_SIZE_PARSER, name, default, help, flag_values,
                 _UNITS_SERIALIZER, **kwargs)


flags.DEFINE_enum(
    'multichase_chase_type', 'simple', sorted(_CHASES),
    'Chase type to use when executing multichase. Passed to multichase via its '
    '-c flag.')
flags.DEFINE_integer(
    'multichase_chase_arg', 1,
    'Argument to refine the chase type specified with --multichase_chase_type. '
    'Applicable for the following types: {0}.'.format(
        ', '.join(_CHASES_WITH_ARGS)))
flag_util.DEFINE_integerlist(
    'multichase_thread_count', flag_util.IntegerList([1]),
    'Number of threads (one per core), to use when executing multichase. '
    'Passed to multichase via its -t flag.')
_DefineMemorySizeFlag(
    'multichase_memory_size_min', _DEFAULT_MEMORY_SIZE,
    'Memory size to use when executing multichase. Passed to multichase via '
    'its -m flag. If it differs from multichase_memory_size_max, then '
    'multichase is executed multiple times, starting with a memory size equal '
    'to the min and doubling while the memory size does not exceed the max. '
    'Can be specified as a percentage of the total memory on the machine.')
_DefineMemorySizeFlag(
    'multichase_memory_size_max', _DEFAULT_MEMORY_SIZE,
    'Memory size to use when executing multichase. Passed to multichase via '
    'its -m flag. If it differs from multichase_memory_size_min, then '
    'multichase is executed multiple times, starting with a memory size equal '
    'to the min and doubling while the memory size does not exceed the max. '
    'Can be specified as a percentage of the total memory on the machine.')
예제 #13
0
        AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE,
        AGAINST_FILE_WITH_FILL_MODE, AGAINST_FILE_WITHOUT_FILL_MODE
    ], 'Whether to run against a raw device or a file, and whether '
    'to prefill.')
flags.DEFINE_string(
    'fio_fill_size', '100%', 'The amount of device to fill in prepare stage. '
    'The valid value can either be an integer, which '
    'represents the number of bytes to fill or a '
    'percentage, which represents the percentage '
    'of the device. A filesystem will be unmounted before '
    'filling and remounted afterwards. Only valid when '
    '--fio_target_mode is against_device_with_fill or '
    'against_file_with_fill.')
flag_util.DEFINE_integerlist('fio_io_depths', [1],
                             'IO queue depths to run on. Can specify a single '
                             'number, like --fio_io_depths=1, a range, like '
                             '--fio_io_depths=1-4, or a list, like '
                             '--fio_io_depths=1-4,6-8',
                             on_nonincreasing=flag_util.IntegerListParser.WARN)
flags.DEFINE_integer('fio_working_set_size',
                     None,
                     'The size of the working set, in GB. If not given, use '
                     'the full size of the device. If using '
                     '--fio_generate_scenarios and not running against a raw '
                     'device, you must pass --fio_working_set_size.',
                     lower_bound=0)
flags.DEFINE_integer(
    'fio_run_for_minutes',
    10,
    'Repeat the job scenario(s) for the given number of '
    'minutes. Only valid when using --fio_generate_scenarios. '
    'When using multiple scenarios, each one is run for the '
예제 #14
0
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample

flags.DEFINE_integer(
    'pgbench_scale_factor', 1, 'scale factor used to fill the database',
    lower_bound=1)
flags.DEFINE_integer(
    'pgbench_seconds_per_test', 10, 'number of seconds to run each test phase',
    lower_bound=1)
flags.DEFINE_integer(
    'pgbench_seconds_to_pause_before_steps', 30,
    'number of seconds to pause before each client load step')
flag_util.DEFINE_integerlist(
    'pgbench_client_counts',
    flag_util.IntegerList([1, 2, 4, 8, 16, 32, 64]),
    'array of client counts passed to pgbench')
FLAGS = flags.FLAGS


BENCHMARK_NAME = 'pgbench'
BENCHMARK_CONFIG = """
pgbench:
  description: pgbench benchmark for managed PostgreSQL databases
  managed_relational_db:
    engine: postgres
    vm_spec:
      GCP:
        machine_type:
          cpus: 16
          memory: 64GiB
FLAGS = flags.FLAGS

flags.DEFINE_string('sysbench_testname', 'oltp_read_write',
                    'The built in oltp lua script to run')
flags.DEFINE_integer('sysbench_tables', 4,
                     'The number of tables used in sysbench oltp.lua tests')
flags.DEFINE_integer('sysbench_table_size', 100000,
                     'The number of rows of each table used in the oltp tests')
flags.DEFINE_integer('sysbench_warmup_seconds', 120,
                     'The duration of the warmup run in which results are '
                     'discarded, in seconds.')
flags.DEFINE_integer('sysbench_run_seconds', 480,
                     'The duration of the actual run in which results are '
                     'collected, in seconds.')
flag_util.DEFINE_integerlist(
    'sysbench_thread_counts',
    flag_util.IntegerList([1, 2, 4, 8, 16, 32, 64]),
    'array of thread counts passed to sysbench, one at a time')
flags.DEFINE_integer('sysbench_latency_percentile', 100,
                     'The latency percentile we ask sysbench to compute.')
flags.DEFINE_integer('sysbench_report_interval', 2,
                     'The interval, in seconds, we ask sysbench to report '
                     'results.')

BENCHMARK_NAME = 'sysbench'
BENCHMARK_CONFIG = """
sysbench:
  description: Sysbench OLTP benchmarks.
  managed_relational_db:
    engine: mysql
    vm_spec:
      GCP:
          machine_type: n1-standard-8
          zone: us-central1-a
        Azure:
          machine_type: Standard_F8s_v2
          zone: eastus2
        AWS:
          boot_disk_size: 200
          machine_type: m5.2xlarge
          zone: us-east-1f
      os_type: ubuntu1804
"""

flags.DEFINE_integer(
    'tf_serving_runtime', 60, 'benchmark runtime in seconds', lower_bound=1)
flag_util.DEFINE_integerlist(
    'tf_serving_client_thread_counts', [16, 32],
    'number of client worker threads',
    module_name=__name__)


class ClientWorkloadScriptExecutionError(Exception):
  pass


def GetConfig(user_config):
  """Loads and returns benchmark config.

  Args:
    user_config: user supplied configuration (flags and config file)

  Returns:
    loaded benchmark configuration
예제 #17
0
        AGAINST_FILE_WITH_FILL_MODE, AGAINST_FILE_WITHOUT_FILL_MODE
    ], 'Whether to run against a raw device or a file, and whether '
    'to prefill.')
flags.DEFINE_string(
    'fio_fill_size', '100%', 'The amount of device to fill in prepare stage. '
    'The valid value can either be an integer, which '
    'represents the number of bytes to fill or a '
    'percentage, which represents the percentage '
    'of the device. A filesystem will be unmounted before '
    'filling and remounted afterwards. Only valid when '
    '--fio_target_mode is against_device_with_fill or '
    'against_file_with_fill.')
flag_util.DEFINE_integerlist('fio_io_depths',
                             flag_util.IntegerList([1]),
                             'IO queue depths to run on. Can specify a single '
                             'number, like --fio_io_depths=1, a range, like '
                             '--fio_io_depths=1-4, or a list, like '
                             '--fio_io_depths=1-4,6-8',
                             on_nonincreasing=flag_util.IntegerListParser.WARN,
                             module_name=__name__)
flag_util.DEFINE_integerlist('fio_num_jobs',
                             flag_util.IntegerList([1]),
                             'Number of concurrent fio jobs to run.',
                             on_nonincreasing=flag_util.IntegerListParser.WARN,
                             module_name=__name__)
flags.DEFINE_integer('fio_working_set_size',
                     None,
                     'The size of the working set, in GB. If not given, use '
                     'the full size of the device. If using '
                     '--fio_generate_scenarios and not running against a raw '
                     'device, you must pass --fio_working_set_size.',
                     lower_bound=0)
예제 #18
0
from perfkitbenchmarker import flag_util
from perfkitbenchmarker.linux_packages import pgbench

flags.DEFINE_integer('pgbench_scale_factor',
                     1,
                     'scale factor used to fill the database',
                     lower_bound=1)
flags.DEFINE_integer('pgbench_seconds_per_test',
                     10,
                     'number of seconds to run each test phase',
                     lower_bound=1)
flags.DEFINE_integer(
    'pgbench_seconds_to_pause_before_steps', 30,
    'number of seconds to pause before each client load step')
flag_util.DEFINE_integerlist('pgbench_client_counts',
                             flag_util.IntegerList([1]),
                             'array of client counts passed to pgbench',
                             module_name=__name__)
flag_util.DEFINE_integerlist(
    'pgbench_job_counts',
    flag_util.IntegerList([]),
    'array of job counts passed to pgbench. Jobs count '
    'is the number of worker threads within pgbench. '
    'When this is empty, Pgbench is run with job counts equals to '
    'client counts. If this is specified, it must have the same length as '
    'pgbench_client_counts.',
    module_name=__name__)
FLAGS = flags.FLAGS

BENCHMARK_NAME = 'pgbench'
BENCHMARK_CONFIG = """
pgbench:
예제 #19
0
flags.DEFINE_integer(
    'sysbench_table_size', 100000,
    'The number of rows of each table used in the oltp tests')
flags.DEFINE_integer('sysbench_scale', 100,
                     'Scale parameter as used by TPCC benchmark.')
flags.DEFINE_integer(
    'sysbench_warmup_seconds', 10,
    'The duration of the warmup run in which results are '
    'discarded, in seconds.')
flags.DEFINE_integer(
    'sysbench_run_seconds', 10,
    'The duration of the actual run in which results are '
    'collected, in seconds.')
flag_util.DEFINE_integerlist(
    'sysbench_thread_counts',
    flag_util.IntegerList([64]),
    'array of thread counts passed to sysbench, one at a time',
    module_name=__name__)
flags.DEFINE_integer('sysbench_latency_percentile', 100,
                     'The latency percentile we ask sysbench to compute.')
flags.DEFINE_integer(
    'sysbench_report_interval', 2,
    'The interval, in seconds, we ask sysbench to report '
    'results.')
flags.DEFINE_integer(
    'sysbench_pre_failover_seconds', 0,
    'If non zero, then after the sysbench workload is '
    'complete, a failover test will be performed.  '
    'When a failover test is run, the database will be driven '
    'using the last entry in sysbench_thread_counts.  After '
    'sysbench_pre_failover_seconds, a failover will be '
예제 #20
0
                                                r'(?P<nodename>\S+)\s+'
                                                r'.*?(?P<cpuids>[\d,-]+)'))
_PKB_NODE_RE = re.compile(r'pkb-(?P<pkbid>.*?)-(?P<nodeindex>\d+)')

# parameters to pass into the benchmark
_NUMBER_ITERATIONS = flags.DEFINE_integer(
    'omb_iterations', None, 'Number of iterations to run in a test.')
_SYNC_OPTION = flags.DEFINE_string('omb_sync_option', None,
                                   '--sync-option value to pass in')

_NUM_SERVER_THREADS = flags.DEFINE_integer('omb_server_threads', None,
                                           'Number of server threads to use.')
_NUM_RECEIVER_THREADS = flags.DEFINE_integer(
    'omb_receiver_threads', None, 'Number of server threads to use.')
flag_util.DEFINE_integerlist(
    'omb_mpi_processes', flag_util.IntegerList([1, 0]),
    'MPI processes to use per host.  1=One process, 0=only real cores')

_MPI_PERHOST = flags.DEFINE_integer('omb_perhost', 1, 'MPI option -perhost.')


@dataclasses.dataclass(frozen=True)
class _RunType:
    """Metadata about a benchmark.

  Attributes:
    columns: The columns in the output.
    value_column: The column that should be use as a sample.Sample value
    units: The units of the value_column.
    supports_full: Whether this benchmark supports --full.
    long_running: Whether this benchmark takes a long time to run.