# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains cloudharmony block storage benchmark installation functions."""

import json
import os
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import fio
from perfkitbenchmarker.linux_packages import INSTALL_DIR

flags.DEFINE_list(
    'ch_params', [],
    'A list of comma seperated "key=value" parameters passed into '
    'cloud harmony benchmarks.')

BENCHMARK = 'block-storage'
INSTALL_PATH = os.path.join(INSTALL_DIR, BENCHMARK)
STEADY_STATE_MEASUREMENT_WINDOW = '-ssmw'


def _Install(vm):
    vm.InstallPackages('fio')  # CloudHarmony doesn't work well with v2.7 fio
    for deps in ['php', 'build_tools']:
        vm.Install(deps)
    vm.RemoteCommand(
        ('git clone https://github.com/cloudharmony/{benchmark}.git '
         '{dir}').format(benchmark=BENCHMARK, dir=INSTALL_PATH))
AGAINST_DEVICE_MODES = {
    AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE
}
FILL_TARGET_MODES = {
    AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_FILE_WITH_FILL_MODE
}

flags.DEFINE_string(
    'fio_jobfile', None,
    'Job file that fio will use. If not given, use a job file '
    'bundled with PKB. Cannot use with '
    '--fio_generate_scenarios.')
flags.DEFINE_list(
    'fio_generate_scenarios', [],
    'Generate a job file with the given scenarios. Special '
    'scenario \'all\' generates all scenarios. Available '
    'scenarios are sequential_write, sequential_read, '
    'random_write, and random_read. Cannot use with '
    '--fio_jobfile.')
flags.DEFINE_enum(
    'fio_target_mode', AGAINST_FILE_WITHOUT_FILL_MODE, [
        AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE,
        AGAINST_FILE_WITH_FILL_MODE, AGAINST_FILE_WITHOUT_FILL_MODE
    ], 'Whether to run against a raw device or a file, and whether '
    'to prefill.')
flags.DEFINE_string(
    'fio_fill_size', '100%', 'The amount of device to fill in prepare stage. '
    'The valid value can either be an integer, which '
    'represents the number of bytes to fill or a '
    'percentage, which represents the percentage '
    'of the device. A filesystem will be unmounted before '
                  'Determines whether latency histograms are '
                  'collected/reported. Only for *RR benchmarks')
flag_util.DEFINE_integerlist('netperf_num_streams', flag_util.IntegerList([1]),
                             'Number of netperf processes to run. Netperf '
                             'will run once for each value in the list.',
                             module_name=__name__)
flags.DEFINE_integer('netperf_thinktime', 0,
                     'Time in nanoseconds to do work for each request.')
flags.DEFINE_integer('netperf_thinktime_array_size', 0,
                     'The size of the array to traverse for thinktime.')
flags.DEFINE_integer('netperf_thinktime_run_length', 0,
                     'The number of contiguous numbers to sum at a time in the '
                     'thinktime array.')

ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR']
flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS,
                  'The netperf benchmark(s) to run.')
flags.register_validator(
    'netperf_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_BENCHMARKS))

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'netperf'
BENCHMARK_CONFIG = """
netperf:
  description: Run TCP_RR, TCP_CRR, UDP_RR and TCP_STREAM
  vm_groups:
    vm_1:
      vm_spec: *default_single_core
    vm_2:
      vm_spec: *default_single_core
Example #4
0
    default:
      vm_spec: *default_single_core
      vm_count: null
"""

SECONDS_PER_HOUR = 60 * 60

flags.DEFINE_integer('memory_size_mb',
                     None,
                     'The amount of memory in MB on each machine to use. By '
                     'default it will use the entire system\'s memory.')
flags.DEFINE_string('hpcc_binary', None,
                    'The path of prebuilt hpcc binary to use. If not provided, '
                    'this benchmark built its own using OpenBLAS.')
flags.DEFINE_list('hpcc_mpi_env', [],
                  'Comma separated list containing environment variables '
                  'to use with mpirun command. e.g. '
                  'MKL_DEBUG_CPU_TYPE=7,MKL_ENABLE_INSTRUCTIONS=AVX512')
flags.DEFINE_integer('hpcc_timeout_hours', 4,
                     'The number of hours to wait for the HPCC binary to '
                     'complete before timing out and assuming it failed.')


def GetConfig(user_config):
  return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)


def CheckPrerequisites(_):
  """Verifies that the required resources are present.

  Raises:
    perfkitbenchmarker.data.ResourceNotFound: On missing resource.
Example #5
0
    'Defaults to 1.')
flags.DEFINE_integer(
    'memtier_run_duration', None, 'Duration for each client count in seconds. '
    'By default, test length is set '
    'by memtier_requests, the number of requests sent by each '
    'client. By specifying run_duration, key space remains '
    'the same (from 1 to memtier_requests), but test stops '
    'once run_duration is passed. '
    'Total test duration = run_duration * runs * '
    'len(memtier_clients).')
flags.DEFINE_integer(
    'memtier_requests', 10000,
    'Number of total requests per client. Defaults to 10000.')
flags.DEFINE_list(
    'memtier_clients', [50],
    'Comma separated list of number of clients per thread. '
    'Specify more than 1 value to vary the number of clients. '
    'Defaults to [50].')
flags.DEFINE_list('memtier_threads', [4], 'Number of threads. Defaults to 4.')
flags.DEFINE_integer(
    'memtier_ratio', 9,
    'Set:Get ratio. Defaults to 9x Get versus Sets (9 Gets to '
    '1 Set in 10 total requests).')
flags.DEFINE_integer('memtier_data_size', 32,
                     'Object data size. Defaults to 32 bytes.')
flags.DEFINE_string(
    'memtier_key_pattern', 'R:R',
    'Set:Get key pattern. G for Gaussian distribution, R for '
    'uniform Random, S for Sequential. Defaults to R:R.')
flags.DEFINE_list(
    'memtier_pipeline', [1],
Example #6
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on GCP."""

from perfkitbenchmarker import flags

# Sentinel value for unspecified platform.
GCP_MIN_CPU_PLATFORM_NONE = 'none'

flags.DEFINE_string('gcloud_path', 'gcloud',
                    'The path for the gcloud utility.')
flags.DEFINE_list('additional_gcloud_flags', [],
                  'Additional flags to pass to gcloud.')
flags.DEFINE_integer(
    'gce_num_local_ssds', 0,
    'The number of ssds that should be added to the VM. Note '
    'that this is currently only supported in certain zones '
    '(see https://cloud.google.com/compute/docs/local-ssd).')
flags.DEFINE_string(
    'gcloud_scopes', None, 'If set, space-separated list of '
    'scopes to apply to every created machine')
flags.DEFINE_boolean('gce_migrate_on_maintenance', True, 'If true, allow VM '
                     'migration on GCE host maintenance.')
flags.DEFINE_boolean('gce_preemptible_vms', False, 'If true, use preemptible '
                     'VMs on GCE.')
flags.DEFINE_string(
    'image_family', None, 'The family of the image that the boot disk will be '
    'initialized with. The --image flag will take priority over this flag. See:'
Example #7
0
        Azure:
          image: Canonical:UbuntuServer:16.04.0-LTS:latest
          machine_type: Standard_NC6
          zone: eastus
"""

GPU = 'gpu'
CPU = 'cpu'
IMAGENET_SHAPE = '3,299,299'

MODELS = [
    'alexnet', 'googlenet', 'inception-bn', 'inception-resnet-v2',
    'inception-v3', 'inception-v4', 'lenet', 'mlp', 'mobilenet', 'resnet-v1',
    'resnet', 'resnext', 'vgg'
]
flags.DEFINE_list('mx_models', ['inception-v3', 'vgg', 'alexnet', 'resnet'],
                  'The network to train')
flags.register_validator(
    'mx_models', lambda models: models and set(models).issubset(MODELS),
    'Invalid models list. mx_models must be a subset of ' + ', '.join(MODELS))
flags.DEFINE_integer('mx_batch_size', None, 'The batch size for SGD training.')
flags.DEFINE_integer('mx_num_epochs', 80,
                     'The maximal number of epochs to train.')
flags.DEFINE_enum('mx_device', GPU, [CPU, GPU],
                  'Device to use for computation: cpu or gpu')
flags.DEFINE_integer(
    'mx_num_layers', None, 'Number of layers in the neural '
    'network, required by some networks such as resnet')

DEFAULT_BATCH_SIZE = 64
DEFAULT = 'default'
DEFAULT_BATCH_SIZES_BY_MODEL = {
Example #8
0
# limitations under the License.
"""Module containing aerospike server installation and cleanup functions."""

import logging
import tempfile

from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR

FLAGS = flags.FLAGS

GIT_REPO = 'https://github.com/aerospike/act.git'
ACT_DIR = '%s/act' % INSTALL_DIR
flags.DEFINE_list('act_load', ['1.0'],
                  'Load multiplier for act test per device.')
flags.DEFINE_boolean('act_parallel', False,
                     'Run act tools in parallel. One copy per device.')
flags.DEFINE_integer('act_duration', 86400, 'Duration of act test in seconds.')
flags.DEFINE_integer('act_reserved_partitions', 0,
                     'Number of partitions reserved (not being used by act).')
flags.DEFINE_integer(
    'act_num_queues', None,
    'Total number of transaction queues. Default is number of'
    ' cores, detected by ACT at runtime.')
flags.DEFINE_integer(
    'act_threads_per_queue', None, 'Number of threads per '
    'transaction queue. Default is 4 threads/queue.')
# TODO(user): Support user provided config file.
ACT_CONFIG_TEMPLATE = """
device-names: {devices}
Example #9
0
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util

from perfkitbenchmarker.linux_packages import gluster

FLAGS = flags.FLAGS
BENCHMARKS = ['VDI', 'DATABASE', 'SWBUILD', 'VDA']

flags.DEFINE_string(
    'specsfs2014_config', None,
    'This flag can be used to specify an alternate SPEC config file to use. '
    'If this option is specified, none of the other benchmark specific flags '
    'which operate on the config file will be used (since the default config '
    'file will be replaced by this one).')
flags.DEFINE_list('specsfs2014_benchmarks', BENCHMARKS,
                  'The SPEC SFS 2014 benchmarks to run.')
flags.register_validator(
    'specsfs2014_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(BENCHMARKS),
    'Invalid benchmarks list. specsfs2014_benchmarks must be a subset of ' +
    ', '.join(BENCHMARKS))
flag_util.DEFINE_integerlist(
    'specsfs2014_load', [1],
    'The starting load in units of SPEC "business metrics". The meaning of '
    'business metric varies depending on the SPEC benchmark (e.g. VDI has '
    'load measured in virtual desktops).',
    module_name=__name__)
flags.DEFINE_integer('specsfs2014_incr_load',
                     1,
                     'The amount to increment "load" by for each run.',
                     lower_bound=1)
    'The number of outstanding I/O per thread per target.'
    'Defaults: 2.')

flags.DEFINE_integer(
    'diskspd_throughput_per_ms', None, 'The throughput per thread per target. '
    'Defaults: None. Unit: bytes per ms.')

flags.DEFINE_integer(
    'diskspd_file_size', 819200,
    'The file size DiskSpd will create when testing. '
    'Defaults: 819200. Unit: KB.')

flags.DEFINE_list(
    'diskspd_config_list', None,
    'comma separated list of configs to run with diskspd. The '
    'format for a single config is RANDOM_ACCESS:IS_READ:BLOCK_SIZE, '
    'for example FALSE:TRUE:64. '
    'Default Behavior: diskspd benchmark test will try to combine'
    '--diskspd_access_pattern, --diskspd_write_read_ratio, '
    '--diskspd_block_size together and form a set a config to run.')

DISKSPD_RETRIES = 10
DISKSPD_DIR = 'DiskSpd-2.0.21a'
DISKSPD_ZIP = DISKSPD_DIR + '.zip'
DISKSPD_URL = ('https://gallery.technet.microsoft.com/DiskSpd-A-Robust-Storage'
               '-6ef84e62/file/199535/2/' + DISKSPD_ZIP)
DISKSPD_TMPFILE = 'testfile.dat'
DISKSPD_XMLFILE = 'result.xml'
DISKSPD_TIMEOUT_MULTIPLIER = 3

TRUE_VALS = ['True', 'true', 't', 'TRUE']
FALSE_VALS = ['False', 'false', 'f', 'FALSE']
Example #11
0
flags.DEFINE_string('managed_db_database_username', None,
                    'Database username. Defaults to '
                    'pkb-db-user-[run-uri]')
flags.DEFINE_string('managed_db_database_password', None,
                    'Database password. Defaults to '
                    'a random 10-character alpha-numeric string')
flags.DEFINE_boolean('managed_db_high_availability', False,
                     'Specifies if the database should be high availability')
flags.DEFINE_boolean('managed_db_backup_enabled', True,
                     'Whether or not to enable automated backups')
flags.DEFINE_string('managed_db_backup_start_time', '07:00',
                    'Time in UTC that automated backups (if enabled) '
                    'will be scheduled. In the form HH:MM UTC. '
                    'Defaults to 07:00 UTC')
flags.DEFINE_list('managed_db_zone', None,
                  'zone or region to launch the database in. '
                  'Defaults to the client vm\'s zone.')
flags.DEFINE_string('client_vm_zone', None,
                    'zone or region to launch the client in. ')
flags.DEFINE_string('managed_db_machine_type', None,
                    'Machine type of the database.')
flags.DEFINE_integer('managed_db_cpus', None,
                     'Number of Cpus in the database.')
flags.DEFINE_string('managed_db_memory', None,
                    'Amount of Memory in the database.  Uses the same format '
                    'string as custom machine memory type.')
flags.DEFINE_integer('managed_db_disk_size', None,
                     'Size of the database disk in GB.')
flags.DEFINE_string('managed_db_disk_type', None, 'Disk type of the database.')
flags.DEFINE_string('client_vm_machine_type', None,
                    'Machine type of the client vm.')
    'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch',
    'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort',
    'mincore', 'null', 'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str',
    'stream', 'tsearch', 'vm', 'vm-rw', 'wcs', 'zero', 'zlib'
}
# Run the stressors that are each part of all of the compute related stress-ng
# classes: cpu, cpu-cache, and memory.
DEFAULT_STRESSORS = sorted(
    CPU_SUITE.intersection(CPU_CACHE_SUITE).intersection(MEMORY_SUITE))

flags.DEFINE_integer('stress_ng_duration', 10,
                     'Number of seconds to run the test.')
flags.DEFINE_boolean('stress_ng_calc_geomean', True,
                     'Whether to calculate geomean or not.')
flags.DEFINE_list(
    'stress_ng_custom_stressors', DEFAULT_STRESSORS,
    'List of stressors to run against. Default combines cpu,'
    'cpu-cache, and memory suites')
flags.DEFINE_list('stress_ng_cpu_methods', [],
                  'List of cpu methods to run with. By default none are ran.')

ALL_WORKLOADS = ['small', 'medium', 'large']
flags.DEFINE_list(
    'stress_ng_thread_workloads', ['large'],
    'List of threads sizes to run against. Options are'
    'small (1 thread total), medium (1 thread per 2 cpus), and '
    'large (1 thread per cpu).')
flags.register_validator(
    'stress_ng_thread_workloads',
    lambda workloads: workloads and set(workloads).issubset(ALL_WORKLOADS))

ALL_VERSIONS = ['0.05.23', '0.09.25']
Example #13
0
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on GCP."""

from perfkitbenchmarker import flags

# Sentinel value for unspecified platform.
GCP_MIN_CPU_PLATFORM_NONE = 'none'

flags.DEFINE_string('gcloud_path', 'gcloud', 'The path for the gcloud utility.')
flags.DEFINE_list('additional_gcloud_flags', [],
                  'Additional flags to pass to gcloud.')
flags.DEFINE_integer(
    'gce_num_local_ssds', 0,
    'The number of ssds that should be added to the VM. Note '
    'that this is currently only supported in certain zones '
    '(see https://cloud.google.com/compute/docs/local-ssd).')
flags.DEFINE_string(
    'gcloud_scopes', None, 'If set, space-separated list of '
    'scopes to apply to every created machine')
flags.DEFINE_boolean('gce_migrate_on_maintenance', True, 'If true, allow VM '
                     'migration on GCE host maintenance.')
flags.DEFINE_boolean('gce_preemptible_vms', False, 'If true, use preemptible '
                     'VMs on GCE.')
flags.DEFINE_string(
    'image_family', None, 'The family of the image that the boot disk will be '
    'initialized with. The --image flag will take priority over this flag. See:'
Example #14
0
BENCHMARK_CONFIG = """
edw_benchmark:
  description: Sample edw benchmark
  edw_service:
    type: redshift
    username: masteruser
    password: masterpassword
    node_type: dc1.large
    node_count: 2
    snapshot:
  vm_groups:
    client:
      vm_spec: *default_single_core
"""
flags.DEFINE_list('edw_benchmark_scripts', 'sample.sql', 'Comma separated '
                  'list of scripts.')

FLAGS = flags.FLAGS


def GetConfig(user_config):
    return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)


def Prepare(benchmark_spec):
    vm = benchmark_spec.vms[0]
    vm.Install('pgbench')


def Run(benchmark_spec):
    """Run phase executes the sql scripts on edw cluster and collects duration."""
          disk_size: 1000
          disk_type: pd-standard
        AWS:
          disk_size: 1000
          disk_type: gp2
    worker_count: 2
"""

BENCHMARK_NAMES = {'tpcds_2_4': 'TPC-DS', 'tpch': 'TPC-H'}

flags.DEFINE_string('dpb_sparksql_data', None,
                    'The dataset to run Spark SQL query')
flags.DEFINE_enum('dpb_sparksql_query', 'tpcds_2_4', BENCHMARK_NAMES.keys(),
                  'A list of query to run on dpb_sparksql_data')
flags.DEFINE_list(
    'dpb_sparksql_order', [],
    'The names (numbers) of the queries to run in order. '
    'If omitted all queries are run in lexographic order.')
flags.DEFINE_string(
    'spark_bigquery_connector',
    'gs://spark-lib/bigquery/spark-bigquery-latest.jar',
    'The Spark BigQuery Connector jar to pass to the Spark Job')
flags.DEFINE_list(
    'bigquery_tables', [],
    'A list of BigQuery tables to load as Temporary Spark SQL views instead '
    'of reading from external Hive tables.')
flags.DEFINE_string(
    'bigquery_record_format', None,
    'The record format to use when connecting to BigQuery storage. See: '
    'https://github.com/GoogleCloudDataproc/spark-bigquery-connector#properties'
)
Example #16
0
flags.DEFINE_boolean('ycsb_histogram', False, 'Include individual '
                     'histogram results from YCSB (will increase sample '
                     'count).')
flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples '
                     'from pre-populating database.')
flags.DEFINE_boolean('ycsb_include_individual_results', False,
                     'Include results from each client VM, rather than just '
                     'combined results.')
flags.DEFINE_boolean('ycsb_reload_database', True,
                     'Reload database, othewise skip load stage. '
                     'Note, this flag is only used if the database '
                     'is already loaded.')
flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.',
                     lower_bound=1)
flags.DEFINE_list('ycsb_workload_files', ['workloada', 'workloadb'],
                  'Path to YCSB workload file to use during *run* '
                  'stage only. Comma-separated list')
flags.DEFINE_list('ycsb_load_parameters', [],
                  'Passed to YCSB during the load stage. Comma-separated list '
                  'of "key=value" pairs.')
flags.DEFINE_list('ycsb_run_parameters', [],
                  'Passed to YCSB during the load stage. Comma-separated list '
                  'of "key=value" pairs.')
flags.DEFINE_list('ycsb_threads_per_client', ['32'], 'Number of threads per '
                  'loader during the benchmark run. Specify a list to vary the '
                  'number of clients.')
flags.DEFINE_integer('ycsb_preload_threads', None, 'Number of threads per '
                     'loader during the initial data population stage. '
                     'Default value depends on the target DB.')
flags.DEFINE_integer('ycsb_record_count', 1000000, 'Pre-load with a total '
                     'dataset of records total.')
import logging
import threading

from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import os_types
from perfkitbenchmarker import resource
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import windows_virtual_machine

FLAGS = flags.FLAGS

flags.DEFINE_list(
    'static_vm_tags', None,
    'The tags of static VMs for PKB to run with. Even if other '
    'VMs are specified in a config, if they aren\'t in this list '
    'they will be skipped during VM creation.')


class StaticVmSpec(virtual_machine.BaseVmSpec):
    """Object containing all info needed to create a Static VM."""

    CLOUD = 'Static'

    def __init__(self,
                 component_full_name,
                 ip_address=None,
                 user_name=None,
                 ssh_private_key=None,
                 internal_ip=None,
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ior

FLAGS = flags.FLAGS

flags.DEFINE_integer('ior_num_procs', 256,
                     'The number of MPI processes to use for IOR.')
flags.DEFINE_string(
    'ior_script', 'default_ior_script', 'The IOR script to run. See '
    'https://github.com/hpc/ior/blob/master/doc/sphinx/userDoc/skripts.rst '
    'for more info.')
flags.DEFINE_integer('mdtest_num_procs', 32,
                     'The number of MPI processes to use for mdtest.')
flags.DEFINE_list(
    'mdtest_args', ['-n 1000 -u'],
    'Command line arguments to be passed to mdtest. '
    'Each set of args in the list will be run separately.')
flags.DEFINE_boolean(
    'mdtest_drop_caches', True,
    'Whether to drop caches between the create/stat/delete phases. '
    'If this is set, mdtest will be run 3 times with the -C, -T, and -r '
    'options and the client page caches will be dropped between runs. '
    'When False, a Full Sweep (Create, Stat, Delete) is run.')

BENCHMARK_NAME = 'ior'
BENCHMARK_CONFIG = """
ior:
  description: Runs IOR and mdtest benchmarks.
  flags:
    data_disk_type: nfs
    data_disk_size: 2048
Example #19
0
        'HPL_ctop',
    },
}

# The names of the benchmarks.
HPCC_BENCHMARKS = sorted(HPCC_METRIC_MAP)


flags.DEFINE_enum(
    'hpcc_math_library', HPCC_MATH_LIBRARY_OPEN_BLAS, [
        HPCC_MATH_LIBRARY_OPEN_BLAS, HPCC_MATH_LIBRARY_MKL,
        HPCC_MATH_LIBRARY_AMD_BLIS
    ], 'The math library to use when compiling hpcc: openblas, mkl, or '
    'amdblis. The default is openblas.')
flags.DEFINE_list(
    'hpcc_benchmarks', [], 'A list of benchmarks in HPCC to run. If none are '
    'specified (the default), then all of the benchmarks are run. In 1.5.0, '
    'the benchmarks may include the following: %s' % ', '.join(HPCC_BENCHMARKS))
flags.register_validator(
    'hpcc_benchmarks',
    lambda hpcc_benchmarks: set(hpcc_benchmarks).issubset(set(HPCC_BENCHMARKS)))
FLAGS = flags.FLAGS


def _LimitBenchmarksToRun(vm, selected_hpcc_benchmarks):
  """Limits the benchmarks to run.

  This function copies hpcc.c to the local machine, comments out code that runs
  benchmarks not listed in selected_hpcc_benchmarks, and then copies hpcc.c back
  to the remote machine.

  Args:
Example #20
0
from perfkitbenchmarker import traces
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import windows_benchmarks
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.linux_benchmarks import cluster_boot_benchmark
from perfkitbenchmarker.publisher import SampleCollector

LOG_FILE_NAME = 'pkb.log'
COMPLETION_STATUS_FILE_NAME = 'completion_statuses.json'
REQUIRED_INFO = ['scratch_disk', 'num_machines']
REQUIRED_EXECUTABLES = frozenset(['ssh', 'ssh-keygen', 'scp', 'openssl'])
MAX_RUN_URI_LENGTH = 12
FLAGS = flags.FLAGS

flags.DEFINE_list('ssh_options', [], 'Additional options to pass to ssh.')
flags.DEFINE_boolean('use_ipv6', False, 'Whether to use ipv6 for ssh/scp.')
flags.DEFINE_list('benchmarks', [benchmark_sets.STANDARD_SET],
                  'Benchmarks and/or benchmark sets that should be run. The '
                  'default is the standard set. For more information about '
                  'benchmarks and benchmark sets, see the README and '
                  'benchmark_sets.py.')
flags.DEFINE_string('archive_bucket', None,
                    'Archive results to the given S3/GCS bucket.')
flags.DEFINE_string('project', None, 'GCP project ID under which '
                    'to create the virtual machines')
flags.DEFINE_list(
    'zones', [],
    'A list of zones within which to run PerfKitBenchmarker. '
    'This is specific to the cloud provider you are running on. '
    'If multiple zones are given, PerfKitBenchmarker will create 1 VM in '
Example #21
0
import re
import time

from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util

from perfkitbenchmarker.linux_packages import oldisim_dependencies

FLAGS = flags.FLAGS

flags.DEFINE_integer('oldisim_num_leaves', 4, 'number of leaf nodes',
                     lower_bound=1, upper_bound=64)
flags.DEFINE_list('oldisim_fanout', [],
                  'a list of fanouts to be tested. '
                  'a root can connect to a subset of leaf nodes (fanout). '
                  'the value of fanout has to be smaller than num_leaves.')
flags.DEFINE_enum('oldisim_latency_metric', 'avg',
                  ['avg', '50p', '90p', '95p', '99p', '99.9p'],
                  'Allowable metrics for end-to-end latency')
flags.DEFINE_float('oldisim_latency_target', '30', 'latency target in ms')

NUM_DRIVERS = 1
NUM_ROOTS = 1
BENCHMARK_NAME = 'oldisim'
BENCHMARK_CONFIG = """
oldisim:
  description: >
      Run oldisim. Specify the number of leaf
      nodes with --oldisim_num_leaves
  vm_groups:
Example #22
0
flags.DEFINE_integer('diskspd_outstanding_io', '2',
                     'The number of outstanding I/O per thread per target.'
                     'Defaults: 2.')

flags.DEFINE_integer('diskspd_throughput_per_ms', None,
                     'The throughput per thread per target. '
                     'Defaults: None. Unit: bytes per ms.')

flags.DEFINE_integer('diskspd_file_size', 819200,
                     'The file size DiskSpd will create when testing. '
                     'Defaults: 819200. Unit: KB.')

flags.DEFINE_list(
    'diskspd_config_list',
    'FALSE:TRUE:64,FALSE:FALSE:64,TRUE:TRUE:64,TRUE:FALSE:64',
    'comma separated list of configs to run with diskspd. The '
    'format for a single config is RANDOM_ACCESS:IS_READ:BLOCK_SIZE, '
    'for example FALSE:TRUE:64')

DISKSPD_RETRIES = 10
DISKSPD_DIR = 'DiskSpd-2.0.21a'
DISKSPD_ZIP = DISKSPD_DIR + '.zip'
DISKSPD_URL = ('https://gallery.technet.microsoft.com/DiskSpd-A-Robust-Storage'
               '-6ef84e62/file/199535/2/' + DISKSPD_ZIP)
DISKSPD_TMPFILE = 'testfile.dat'
DISKSPD_XMLFILE = 'result.xml'
DISKSPD_TIMEOUT_MULTIPLIER = 3

TRUE_VALS = ['True', 'true', 't', 'TRUE']
FALSE_VALS = ['False', 'false', 'f', 'FALSE']
Example #23
0
# code file, so that other processes may wait for completion.
EXECUTE_COMMAND = 'execute_command.py'
# WAIT_FOR_COMMAND waits on the file lock created by EXECUTE_COMMAND,
# then copies the stdout and stderr, exiting with the status of the command run
# by EXECUTE_COMMAND.
WAIT_FOR_COMMAND = 'wait_for_command.py'

flags.DEFINE_bool(
    'setup_remote_firewall', False,
    'Whether PKB should configure the firewall of each remote'
    'VM to make sure it accepts all internal connections.')

flags.DEFINE_list(
    'sysctl', [],
    'Sysctl values to set. This flag should be a comma-separated '
    'list of path=value pairs. Each value will be written to the '
    'corresponding path. For example, if you pass '
    '--sysctls=vm.dirty_background_ratio=10,vm.dirty_ratio=25, '
    'PKB will run "sysctl vm.dirty_background_ratio=10 '
    'vm.dirty_ratio=25" before starting the benchmark.')

flags.DEFINE_list(
    'set_files',
    [],
    'Arbitrary filesystem configuration. This flag should be a '
    'comma-separated list of path=value pairs. Each value will '
    'be written to the corresponding path. For example, if you '
    'pass --set_files=/sys/kernel/mm/transparent_hugepage/enabled=always, '  # noqa
    'then PKB will write "always" to '
    '/sys/kernel/mm/transparent_hugepage/enabled before starting '
    'the benchmark.')
Example #24
0
    'dedicated_hosts', False,
    'If True, use hosts that only have VMs from the same '
    'benchmark running on them.')
flags.DEFINE_integer(
    'num_vms_per_host', None,
    'The number of VMs per dedicated host. If None, VMs will be packed on a '
    'single host until no more can be packed at which point a new host will '
    'be created.')
flags.DEFINE_integer(
    'num_cpus_override', None,
    'Rather than detecting the number of CPUs present on the machine, use this '
    'value if set. Some benchmarks will use this number to automatically '
    'scale their configurations; this can be used as a method to control '
    'benchmark scaling. It will also change the num_cpus metadata '
    'published along with the benchmark data.')
flags.DEFINE_list(VM_METADATA, [], 'Metadata to add to the vm. It expects'
                  'key:value pairs.')
flags.register_validator(VM_METADATA, ValidateVmMetadataFlag)
flags.DEFINE_bool(
    'skip_firewall_rules', False,
    'If set, this run will not create firewall rules. This is useful if the '
    'user project already has all of the firewall rules in place and/or '
    'creating new ones is expensive')

# Note: If adding a gpu type here, be sure to add it to
# the flag definition in pkb.py too.
VALID_GPU_TYPES = ['k80', 'p100', 'v100', 'p4', 'p4-vws', 't4']


def GetVmSpecClass(cloud):
    """Returns the VmSpec class corresponding to 'cloud'."""
    return spec.GetSpecClass(BaseVmSpec, CLOUD=cloud)
Example #25
0
flags.DEFINE_boolean(
    'nfs_managed', True,
    'Use a managed NFS service if using NFS disks. Otherwise '
    'start an NFS server on the first VM.')
flags.DEFINE_string(
    'nfs_ip_address', None,
    'If specified, PKB will target this ip address when '
    'mounting NFS "disks" rather than provisioning an NFS '
    'Service for the corresponding cloud.')
flags.DEFINE_string(
    'nfs_directory', None,
    'Directory to mount if using a StaticNfsService. This '
    'corresponds to the "VOLUME_NAME" of other NfsService '
    'classes.')
flags.DEFINE_string('smb_version', '3.0', 'SMB version.')
flags.DEFINE_list('mount_options', [],
                  'Additional arguments to supply when mounting.')
flags.DEFINE_list('fstab_options', [],
                  'Additional arguments to supply to fstab.')

FLAGS = flags.FLAGS

# These are the (deprecated) old disk type names
STANDARD = 'standard'
REMOTE_SSD = 'remote_ssd'
PIOPS = 'piops'  # Provisioned IOPS (SSD) in AWS and Alicloud
REMOTE_ESSD = 'remote_essd'  # Enhanced Cloud SSD in Alicloud

# 'local' refers to disks that come attached to VMs. It is the only
# "universal" disk type that is not associated with a provider. It
# exists because we can provision a local disk without creating a disk
# spec. The Aerospike benchmarks use this fact in their config
Example #26
0
import collections
import re
import time
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util

flags.DEFINE_integer('nccl_slots', 8,
                     'Launch n processes per node on all allocated nodes')
flags.DEFINE_string(
    'nccl_cuda_visible_devices', None, 'GPU identifiers are '
    'given as integer indices or as UUID strings.')
flags.DEFINE_list('nccl_extra_params', [], 'Export an environment variable')
flags.DEFINE_string('nccl_minbytes', '8', 'Minimum size to start with')
flags.DEFINE_string('nccl_maxbytes', '256M', 'Maximum size to start with')
flags.DEFINE_integer('nccl_stepfactor', 2,
                     'Multiplication factor between sizes')
flags.DEFINE_integer('nccl_ngpus', 1, 'Number of gpus per thread.')
flags.DEFINE_boolean('nccl_check', False, 'Check correctness of results.')
flags.DEFINE_integer('nccl_nthreads', 1, 'Number of threads per process')
flags.DEFINE_integer('nccl_num_runs',
                     10,
                     'The number of consecutive run.',
                     lower_bound=1)
flags.DEFINE_integer('nccl_seconds_between_runs', 10,
                     'Sleep between consecutive run.')
flags.DEFINE_integer('nccl_iters', 20, 'Number of iterations')
flags.DEFINE_boolean('nccl_install_mofed', False,
Example #27
0
from perfkitbenchmarker import traces
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import windows_benchmarks
from perfkitbenchmarker.publisher import SampleCollector

STAGE_ALL = 'all'
STAGE_PREPARE = 'prepare'
STAGE_RUN = 'run'
STAGE_CLEANUP = 'cleanup'
LOG_FILE_NAME = 'pkb.log'
REQUIRED_INFO = ['scratch_disk', 'num_machines']
REQUIRED_EXECUTABLES = frozenset(['ssh', 'ssh-keygen', 'scp', 'openssl'])
FLAGS = flags.FLAGS

flags.DEFINE_list('ssh_options', [], 'Additional options to pass to ssh.')
flags.DEFINE_integer('parallelism', 1,
                     'The number of benchmarks to run in parallel.')
flags.DEFINE_list(
    'benchmarks', [benchmark_sets.STANDARD_SET],
    'Benchmarks and/or benchmark sets that should be run. The '
    'default is the standard set. For more information about '
    'benchmarks and benchmark sets, see the README and '
    'benchmark_sets.py.')
flags.DEFINE_string(
    'project', None, 'GCP project ID under which '
    'to create the virtual machines')
flags.DEFINE_list(
    'zones', [None], 'A list of zones within which to run PerfKitBenchmarker.'
    ' This is specific to the cloud provider you are running on. '
    'If multiple zones are given, PerfKitBenchmarker will create 1 VM in '
Example #28
0
from perfkitbenchmarker import flags

flags.DEFINE_string(
    'ceph_secret', None,
    'Name of the Ceph Secret used by Kubernetes in order to '
    'authenticate with Ceph. If provided, overrides keyring.')

flags.DEFINE_string('ceph_keyring', '/etc/ceph/keyring',
                    'Path to the Ceph keyring file.')

flags.DEFINE_string('rbd_pool', 'rbd', 'Name of RBD pool for Ceph volumes.')

flags.DEFINE_string('rbd_user', 'admin', 'Name of RADOS user.')

flags.DEFINE_list(
    'ceph_monitors', [], 'IP addresses and ports of Ceph Monitors. '
    'Must be provided when Ceph scratch disk is required. '
    'Example: "127.0.0.1:6789,192.168.1.1:6789"')

flags.DEFINE_string(
    'username', 'root',
    'User name that Perfkit will attempt to use in order to '
    'SSH into Docker instance.')

flags.DEFINE_boolean(
    'docker_in_privileged_mode', True,
    'If set to True, will attempt to create Docker containers '
    'in a privileged mode. Note that some benchmarks execute '
    'commands which are only allowed in privileged mode.')
flags.DEFINE_boolean(
    'kubernetes_anti_affinity', True,
    'If set to True, PKB pods will not be scheduled on the '
Example #29
0
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec

FLAGS = flags.FLAGS
DEFAULT_USERNAME = '******'

_VM_SPEC_REGISTRY = {}
_VM_REGISTRY = {}

flags.DEFINE_boolean(
    'dedicated_hosts', False,
    'If True, use hosts that only have VMs from the same '
    'benchmark running on them.')
flags.DEFINE_list(
    'vm_metadata', [], 'Metadata to add to the vm '
    'via the provider\'s AddMetadata function. It expects'
    'key:value pairs')
VALID_GPU_TYPES = ['k80', 'p100', 'v100']


def GetVmSpecClass(cloud):
    """Returns the VmSpec class corresponding to 'cloud'."""
    return spec.GetSpecClass(BaseVmSpec, CLOUD=cloud)


def GetVmClass(cloud, os_type):
    """Returns the VM class corresponding to 'cloud' and 'os_type'."""
    return resource.GetResourceClass(BaseVirtualMachine,
                                     CLOUD=cloud,
                                     OS_TYPE=os_type)
Example #30
0
INTSPEED_SUITE = [benchmark + '_s' for benchmark in INT_SUITE]
INTRATE_SUITE = [benchmark + '_r' for benchmark in INT_SUITE]

COMMON_FP_SUITE = [
    'bwaves', 'cactuBSSN', 'lbm', 'wrf', 'cam4', 'imagick', 'nab', 'fotonik3d',
    'roms'
]
FPSPEED_SUITE = [benchmark + '_s'
                 for benchmark in COMMON_FP_SUITE] + ['pop2_s']
FPRATE_SUITE = [benchmark + '_r' for benchmark in COMMON_FP_SUITE
                ] + ['namd_r', 'parest_r', 'povray_r', 'blender_r']

FLAGS = flags.FLAGS
flags.DEFINE_list(
    'spec17_subset', ['intspeed', 'fpspeed', 'intrate', 'fprate'],
    'Specify which speccpu2017 tests to run. Accepts a list of '
    'benchmark suites (intspeed, fpspeed, intrate, fprate) '
    'or individual benchmark names. Defaults to all suites.')
flags.DEFINE_integer(
    'spec17_copies', None,
    'Number of copies to run for rate tests. If not set '
    'default to number of cpu cores using lscpu.')
flags.DEFINE_integer(
    'spec17_threads', None,
    'Number of threads to run for speed tests. If not set '
    'default to number of cpu threads using lscpu.')
flags.DEFINE_boolean(
    'spec17_fdo', False, 'Run with feedback directed optimization on peak. '
    'Default to False.')

BENCHMARK_NAME = 'speccpu2017'