'represents the number of bytes to fill or a ' 'percentage, which represents the percentage ' 'of the device. A filesystem will be unmounted before ' 'filling and remounted afterwards. Only valid when ' '--fio_target_mode is against_device_with_fill or ' 'against_file_with_fill.') flag_util.DEFINE_integerlist('fio_io_depths', [1], 'IO queue depths to run on. Can specify a single ' 'number, like --fio_io_depths=1, a range, like ' '--fio_io_depths=1-4, or a list, like ' '--fio_io_depths=1-4,6-8', on_nonincreasing=flag_util.IntegerListParser.WARN) flags.DEFINE_integer('fio_working_set_size', None, 'The size of the working set, in GB. If not given, use ' 'the full size of the device. If using ' '--fio_generate_scenarios and not running against a raw ' 'device, you must pass --fio_working_set_size.', lower_bound=0) flags.DEFINE_integer( 'fio_run_for_minutes', 10, 'Repeat the job scenario(s) for the given number of ' 'minutes. Only valid when using --fio_generate_scenarios. ' 'When using multiple scenarios, each one is run for the ' 'given number of minutes. Time will be rounded up to the ' 'next multiple of %s minutes.' % MINUTES_PER_JOB, lower_bound=0) FLAGS_IGNORED_FOR_CUSTOM_JOBFILE = { 'fio_generate_scenarios', 'fio_io_depths', 'fio_run_for_minutes'
import collections import ntpath import time import xml.etree.ElementTree from perfkitbenchmarker import background_tasks from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util import six FLAGS = flags.FLAGS flags.DEFINE_integer( 'ntttcp_threads', 1, 'The number of client and server threads for NTttcp ' 'to run with.') flags.DEFINE_integer('ntttcp_time', 60, 'The number of seconds for NTttcp to run.') flags.DEFINE_bool('ntttcp_udp', False, 'Whether to run a UDP test.') flags.DEFINE_integer('ntttcp_cooldown_time', 60, 'Time to wait between the test runs.') flags.DEFINE_integer('ntttcp_packet_size', None, 'The size of the packet being used in the test.') flags.DEFINE_integer( 'ntttcp_sender_sb', -1,
import unittest from perfkitbenchmarker import benchmark_spec from perfkitbenchmarker import configs from perfkitbenchmarker import context from perfkitbenchmarker import flags from perfkitbenchmarker import os_types from perfkitbenchmarker import providers from perfkitbenchmarker import static_virtual_machine as static_vm from perfkitbenchmarker.configs import benchmark_config_spec from perfkitbenchmarker.providers.aws import aws_virtual_machine as aws_vm from perfkitbenchmarker.providers.gcp import gce_virtual_machine as gce_vm from perfkitbenchmarker.linux_benchmarks import iperf_benchmark from tests import mock_flags flags.DEFINE_integer('benchmark_spec_test_flag', 0, 'benchmark_spec_test flag.') FLAGS = flags.FLAGS NAME = 'name' UID = 'name0' SIMPLE_CONFIG = """ name: vm_groups: default: vm_spec: GCP: machine_type: n1-standard-4 zone: us-central1-c project: my-project """
FLAGS = flags.FLAGS GIT_REPO = 'https://github.com/aerospike/aerospike-server.git' GIT_TAG = '4.0.0.1' AEROSPIKE_DIR = '%s/aerospike-server' % INSTALL_DIR AEROSPIKE_CONF_PATH = '%s/as/etc/aerospike_dev.conf' % AEROSPIKE_DIR AEROSPIKE_DEFAULT_TELNET_PORT = 3003 MEMORY = 'memory' DISK = 'disk' flags.DEFINE_enum( 'aerospike_storage_type', MEMORY, [MEMORY, DISK], 'The type of storage to use for Aerospike data. The type of ' 'disk is controlled by the "data_disk_type" flag.') flags.DEFINE_integer('aerospike_replication_factor', 1, 'Replication factor for aerospike server.') flags.DEFINE_integer('aerospike_transaction_threads_per_queue', 4, 'Number of threads per transaction queue.') def _Install(vm): """Installs the Aerospike server on the VM.""" vm.Install('build_tools') vm.Install('lua5_1') vm.Install('openssl') vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, AEROSPIKE_DIR)) # Comment out Werror flag and compile. With newer compilers gcc7xx, # compilation is broken due to warnings. vm.RemoteCommand( 'cd {0} && git checkout {1} && git submodule update --init ' '&& sed -i "s/COMMON_CFLAGS += -Werror/# $COMMON_CFLAGS += -Werror/" '
'against_file_with_fill.') flag_util.DEFINE_integerlist('fio_io_depths', flag_util.IntegerList([1]), 'IO queue depths to run on. Can specify a single ' 'number, like --fio_io_depths=1, a range, like ' '--fio_io_depths=1-4, or a list, like ' '--fio_io_depths=1-4,6-8', on_nonincreasing=flag_util.IntegerListParser.WARN) flag_util.DEFINE_integerlist('fio_num_jobs', flag_util.IntegerList([1]), 'Number of concurrent fio jobs to run.', on_nonincreasing=flag_util.IntegerListParser.WARN) flags.DEFINE_integer('fio_working_set_size', None, 'The size of the working set, in GB. If not given, use ' 'the full size of the device. If using ' '--fio_generate_scenarios and not running against a raw ' 'device, you must pass --fio_working_set_size.', lower_bound=0) flag_util.DEFINE_units( 'fio_blocksize', None, 'The block size for fio operations. Default is given by ' 'the scenario when using --generate_scenarios. This ' 'flag does not apply when using --fio_jobfile.', convertible_to=units.byte) flags.DEFINE_integer('fio_runtime', 600, 'The number of seconds to run each fio job for.', lower_bound=1) flags.DEFINE_list( 'fio_parameters', [],
zone: us-east-1 boot_disk_size: 200 Azure: machine_type: Standard_NC6 zone: eastus """ GCP_ENV = 'PATH=/tmp/pkb/google-cloud-sdk/bin:$PATH' flags.DEFINE_string('mnist_data_dir', None, 'mnist train file for tensorflow') flags.DEFINE_string('imagenet_data_dir', 'gs://cloud-tpu-test-datasets/fake_imagenet', 'Directory where the input data is stored') flags.DEFINE_string( 't2t_data_dir', None, 'Directory where the input data is stored for tensor2tensor') flags.DEFINE_integer('imagenet_num_train_images', 1281167, 'Size of ImageNet training data set.') flags.DEFINE_integer('imagenet_num_eval_images', 50000, 'Size of ImageNet validation data set.') flags.DEFINE_integer('mnist_num_train_images', 55000, 'Size of MNIST training data set.') flags.DEFINE_integer('mnist_num_eval_images', 5000, 'Size of MNIST validation data set.') flags.DEFINE_integer('mnist_train_epochs', 37, 'Total number of training echos', lower_bound=1) flags.DEFINE_integer('mnist_eval_epochs', 1, 'Total number of evaluation epochs. If `0`, evaluation ' 'after training is skipped.') flags.DEFINE_integer('tpu_iterations', 500, 'Number of iterations per TPU training loop.') flags.DEFINE_integer('mnist_batch_size', 1024, 'Mini-batch size for the training. Note that this '
'zone, until enough VMs are created as specified in each ' 'benchmark. The order in which this flag is applied to VMs is ' 'undefined.') flags.DEFINE_list( 'extra_zones', [], 'Zones that will be appended to the "zones" list. This is functionally ' 'the same, but allows flag matrices to have two zone axes.') # TODO(user): note that this is currently very GCE specific. Need to create a # module which can translate from some generic types to provider specific # nomenclature. flags.DEFINE_string( 'machine_type', None, 'Machine ' 'types that will be created for benchmarks that don\'t ' 'require a particular type.') flags.DEFINE_integer( 'gpu_count', None, 'Number of gpus to attach to the VM. Requires gpu_type to be ' 'specified.') flags.DEFINE_enum( 'gpu_type', None, ['k80', 'p100', 'v100', 'p4', 'p4-vws', 't4'], 'Type of gpus to attach to the VM. Requires gpu_count to be ' 'specified.') flags.DEFINE_integer( 'num_vms', 1, 'For benchmarks which can make use of a ' 'variable number of machines, the number of VMs to use.') flags.DEFINE_string('image', None, 'Default image that will be ' 'linked to the VM') flags.DEFINE_string( 'run_uri', None, 'Name of the Run. If provided, this ' 'should be alphanumeric and less than or equal to %d ' 'characters in length.' % MAX_RUN_URI_LENGTH) flags.DEFINE_boolean(
import io import json import logging import os import re from perfkitbenchmarker import configs from perfkitbenchmarker import data from perfkitbenchmarker import flag_util from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import netperf flags.DEFINE_integer('netperf_max_iter', None, 'Maximum number of iterations to run during ' 'confidence interval estimation. If unset, ' 'a single iteration will be run.', lower_bound=3, upper_bound=30) flags.DEFINE_integer('netperf_test_length', 60, 'netperf test length, in seconds', lower_bound=1) flags.DEFINE_bool( 'netperf_enable_histograms', True, 'Determines whether latency histograms are ' 'collected/reported. Only for *RR benchmarks') flag_util.DEFINE_integerlist('netperf_num_streams', flag_util.IntegerList([1]), 'Number of netperf processes to run. Netperf ' 'will run once for each value in the list.',
} BENCHMARK_NAME = 'hpcc' BENCHMARK_CONFIG = """ hpcc: description: Runs HPCC. Specify the number of VMs with --num_vms vm_groups: default: vm_spec: *default_single_core vm_count: null """ SECONDS_PER_HOUR = 60 * 60 flags.DEFINE_integer( 'memory_size_mb', None, 'The amount of memory in MB on each machine to use. By ' 'default it will use the entire system\'s memory.') flags.DEFINE_string( 'hpcc_binary', None, 'The path of prebuilt hpcc binary to use. If not provided, ' 'this benchmark built its own using OpenBLAS.') flags.DEFINE_list( 'hpcc_mpi_env', [], 'Comma separated list containing environment variables ' 'to use with mpirun command. e.g. ' 'MKL_DEBUG_CPU_TYPE=7,MKL_ENABLE_INSTRUCTIONS=AVX512') flags.DEFINE_integer( 'hpcc_timeout_hours', 4, 'The number of hours to wait for the HPCC binary to ' 'complete before timing out and assuming it failed.')
More information about DiskSpd may be found here: https://gallery.technet.microsoft.com/DiskSpd-a-robust-storage-6cd2f223 """ import ntpath import xml.etree.ElementTree from perfkitbenchmarker import background_tasks from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util FLAGS = flags.FLAGS flags.DEFINE_integer('diskspd_duration', 30, 'The number of seconds to run diskspd test.' 'Defaults to 30s. Unit: seconds.') flags.DEFINE_integer('diskspd_warmup', 5, 'The warm up time for diskspd, the time needed to enter' 'steady state of I/O operation. ' 'Defaults to 5s. Unit: seconds.') flags.DEFINE_integer('diskspd_cooldown', 5, 'The cool down time for diskspd, the time to ensure that' 'each instance of diskspd is active during each' 'measurement period of each instance. ' 'Defaults: 5s. Unit: seconds') flags.DEFINE_integer('diskspd_thread_number_per_file', 1, 'The thread number created per file to'
from perfkitbenchmarker import flags from perfkitbenchmarker.linux_packages import fio LOGGING = 'logging' DATABASE = 'database' STREAMING = 'streaming' flags.DEFINE_enum('workload_mode', LOGGING, [LOGGING, DATABASE, STREAMING], 'Simulate a logging, database or streaming scenario.') flags.DEFINE_list( 'iodepth_list', [], 'A list of iodepth parameter used by ' 'fio command in simulated database and streaming scenarios ' 'only.') flags.DEFINE_integer('maxjobs', 0, 'The maximum allowed number of jobs to support.') FLAGS = flags.FLAGS BENCHMARK_NAME = 'block_storage_workload' BENCHMARK_CONFIG = """ block_storage_workload: description: > Runs FIO in sequential, random, read and write modes to simulate various scenarios. vm_groups: default: vm_spec: *default_single_core disk_spec: *default_500_gb """
'MinLatency(ms)': min, 'MaxLatency(ms)': max } flags.DEFINE_boolean( 'ycsb_histogram', True, 'Include individual ' 'histogram results from YCSB (will increase sample ' 'count).') flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples ' 'from pre-populating database.') flags.DEFINE_boolean( 'ycsb_include_individual_results', False, 'Include results from each client VM, rather than just ' 'combined results.') flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.', lower_bound=1) flags.DEFINE_list( 'ycsb_workload_files', [], 'Path to YCSB workload file to use during *run* ' 'stage only. Comma-separated list') flags.DEFINE_list( 'ycsb_load_parameters', [], 'Passed to YCSB during the load stage. Comma-separated list ' 'of "key=value" pairs.') flags.DEFINE_list( 'ycsb_run_parameters', [], 'Passed to YCSB during the load stage. Comma-separated list ' 'of "key=value" pairs.') flags.DEFINE_list( 'ycsb_threads_per_client', ['32'], 'Number of threads per '
from perfkitbenchmarker.linux_packages import wrk2 FLAGS = flags.FLAGS _FLAG_FORMAT_DESCRIPTION = ( 'The format is "target_request_rate:duration:threads:connections", with ' 'each value being per client (so running with 2 clients would double the ' 'target rate, threads, and connections (but not duration since they are ' 'run concurrently)). The target request rate is measured in requests per ' 'second and the duration is measured in seconds.') flags.DEFINE_string( 'nginx_conf', None, 'The path to an Nginx config file that should be applied ' 'to the server instead of the default one.') flags.DEFINE_integer('nginx_content_size', 10000, 'The size of the content Nginx will serve in bytes.') flags.DEFINE_list( 'nginx_load_configs', ['100:60:1:1'], 'For each load spec in the list, wrk2 will be run once ' 'against Nginx with those parameters. ' + _FLAG_FORMAT_DESCRIPTION) def _ValidateLoadConfigs(load_configs): """Validate that each load config has all required values.""" if not load_configs: return False for config in load_configs: config_values = config.split(':') if len(config_values) != 4: return False for value in config_values:
from perfkitbenchmarker.configs import spec FLAGS = flags.FLAGS DEFAULT_USERNAME = '******' _VM_SPEC_REGISTRY = {} _VM_REGISTRY = {} flags.DEFINE_boolean( 'dedicated_hosts', False, 'If True, use hosts that only have VMs from the same ' 'benchmark running on them.') flags.DEFINE_integer( 'num_cpus_override', None, 'Rather than detecting the number of CPUs present on the machine, use this ' 'value if set. Some benchmarks will use this number to automatically ' 'scale their configurations; this can be used as a method to control ' 'benchmark scaling. It will also change the num_cpus metadata ' 'published along with the benchmark data.') flags.DEFINE_list( 'vm_metadata', [], 'Metadata to add to the vm ' 'via the provider\'s AddMetadata function. It expects' 'key:value pairs') # Note: If adding a gpu type here, be sure to add it to # the flag definition in pkb.py too. VALID_GPU_TYPES = ['k80', 'p100', 'v100', 'p4', 'p4-vws'] def GetVmSpecClass(cloud): """Returns the VmSpec class corresponding to 'cloud'.""" return spec.GetSpecClass(BaseVmSpec, CLOUD=cloud)
_WAIT_MIN_RECHECK_DELAY = 0.001 # 1 ms _WAIT_MAX_RECHECK_DELAY = 0.050 # 50 ms # Values sent to child threads that have special meanings. _THREAD_STOP_PROCESSING = 0 _THREAD_WAIT_FOR_KEYBOARD_INTERRUPT = 1 # The default value for max_concurrent_threads. MAX_CONCURRENT_THREADS = 200 # The default value is set in pkb.py. It is the greater of # MAX_CONCURRENT_THREADS or the value passed to --num_vms. This is particularly # important for the cluster_boot benchmark where we want to launch all of the # VMs in parallel. flags.DEFINE_integer( 'max_concurrent_threads', None, 'Maximum number of concurrent threads to ' 'use when running a benchmark.') FLAGS = flags.FLAGS def _GetCallString(target_arg_tuple): """Returns the string representation of a function call.""" target, args, kwargs = target_arg_tuple while isinstance(target, functools.partial): args = target.args + args inner_kwargs = target.keywords.copy() inner_kwargs.update(kwargs) kwargs = inner_kwargs target = target.func arg_strings = [str(a) for a in args] arg_strings.extend(
Azure: machine_type: Standard_NC6 zone: eastus """ flags.DEFINE_enum( 'resnet_depth', '50', ['18', '34', '50', '101', '152', '200'], 'Depth of ResNet model to use. Deeper models require more ' 'training time and more memory and may require reducing ' '--resnet_train_batch_size to prevent running out of memory.') flags.DEFINE_enum('resnet_mode', 'train_and_eval', ['train', 'eval', 'train_and_eval'], 'Mode to run: train, eval, train_and_eval') flags.DEFINE_integer( 'resnet_train_steps', 112603, 'The Number of steps to use for training. Default is ' '112603 steps which is approximately 90 epochs at batch ' 'size 1024. This flag should be adjusted according to the ' '--resnet_train_batch_size flag.') flags.DEFINE_integer('resnet_train_batch_size', 1024, 'Global (not per-shard) batch size for training') flags.DEFINE_integer('resnet_eval_batch_size', 1024, 'Global (not per-shard) batch size for evaluation') flags.DEFINE_integer( 'resnet_num_cores', 8, 'Number of TPU cores. For a single ' 'TPU device, this is 8 because each TPU has 4 chips each ' 'with 2 cores.') flags.DEFINE_enum( 'resnet_data_format', 'channels_last', ['channels_first', 'channels_last'], 'A flag to override the data format used in the model. The ' 'value is either channels_first or channels_last. To run the ' 'network on CPU or TPU, channels_last should be used. For GPU'
from perfkitbenchmarker import configs from perfkitbenchmarker import errors from perfkitbenchmarker import flags from perfkitbenchmarker import linux_packages from perfkitbenchmarker import sample _BENCHMARKS = [ 'avrora', 'batik', 'eclipse', 'fop', 'h2', 'jython', 'luindex', 'lusearch', 'pmd', 'sunflow', 'tomcat', 'tradebeans', 'tradesoap', 'xalan' ] flags.DEFINE_string('dacapo_jar_filename', 'dacapo-9.12-MR1-bach.jar', 'Filename of DaCapo jar file.') flags.DEFINE_enum('dacapo_benchmark', 'luindex', _BENCHMARKS, 'Name of specific DaCapo benchmark to execute.') flags.DEFINE_integer('dacapo_num_iters', 1, 'Number of iterations to execute.') FLAGS = flags.FLAGS BENCHMARK_NAME = 'dacapo' BENCHMARK_CONFIG = """ dacapo: description: Runs DaCapo benchmarks vm_groups: default: vm_spec: *default_single_core """ _PASS_PATTERN = re.compile(r'^=====.*PASSED in (\d+) msec =====$') def GetConfig(user_config):
CERT_FILE = 'perfkitbenchmarker.pem' TEMP_DIR = '/tmp/perfkitbenchmarker' # The temporary directory on VMs. We cannot reuse GetTempDir() # because run_uri will not be available at time of module load and we need # to use this directory as a base for other module level constants. VM_TMP_DIR = '/tmp/pkb' # Defaults for retrying commands. POLL_INTERVAL = 30 TIMEOUT = 1200 FUZZ = .5 MAX_RETRIES = -1 flags.DEFINE_integer( 'default_timeout', TIMEOUT, 'The default timeout for ' 'retryable commands in seconds.') flags.DEFINE_integer('burn_cpu_seconds', 0, 'Amount of time in seconds to burn cpu on vm.') flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to burn cpu.') class IpAddressSubset(object): """Enum of options for --ip_addresses.""" REACHABLE = 'REACHABLE' BOTH = 'BOTH' INTERNAL = 'INTERNAL' EXTERNAL = 'EXTERNAL' ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
gpu_type: k80 gpu_count: 1 zone: us-east1-d boot_disk_size: 200 AWS: machine_type: p2.xlarge zone: us-east-1 boot_disk_size: 200 Azure: machine_type: Standard_NC6 zone: eastus vm_count: null """ flags.DEFINE_integer('hpcg_runtime', 60, 'hpcg runtime in seconds', lower_bound=1) flags.DEFINE_integer('hpcg_gpus_per_node', None, 'The number of gpus per node.', lower_bound=1) flag_util.DEFINE_integerlist( 'hpcg_problem_size', flag_util.IntegerList([256, 256, 256]), 'three dimensional problem size for each node. Must contain ' 'three integers', module_name=__name__)
'of the index you want to test') flags.DEFINE_boolean('aws_dynamodb_use_sort', False, 'determine whether to use sort key or not') flags.DEFINE_string( 'aws_dynamodb_sortkey', 'sort_key', 'The sortkey of dynamodb table. ' 'This switches to primarykey if using sort.' 'If testing GSI/LSI, use the primary keyname' 'of the index you want to test') flags.DEFINE_enum( 'aws_dynamodb_attributetype', 'S', ['S', 'N', 'B'], 'The type of attribute, default to S (String).' 'Alternates are N (Number) and B (Binary).') flags.DEFINE_string('aws_dynamodb_capacity', 'ReadCapacityUnits=5,WriteCapacityUnits=5', 'Set RCU/WCU for dynamodb table') flags.DEFINE_integer('aws_dynamodb_lsi_count', 0, 'Set amount of Local Secondary Indexes. Only set 0-5') flags.register_validator('aws_dynamodb_lsi_count', lambda value: -1 < value < 6, message='--count must be from 0-5') flags.register_validator('aws_dynamodb_use_sort', lambda sort: sort or not FLAGS.aws_dynamodb_lsi_count, message='--aws_dynamodb_lsi_count requires sort key.') flags.DEFINE_integer('aws_dynamodb_gsi_count', 0, 'Set amount of Global Secondary Indexes. Only set 0-5') flags.register_validator('aws_dynamodb_gsi_count', lambda value: -1 < value < 6, message='--count must be from 0-5') class _GetIndexes(): """Used to create secondary indexes."""
flags.DEFINE_enum('ycsb_measurement_interval', 'op', ['op', 'intended', 'both'], 'Measurement interval to use for ycsb. Defaults to op.') flags.DEFINE_boolean('ycsb_histogram', False, 'Include individual ' 'histogram results from YCSB (will increase sample ' 'count).') flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples ' 'from pre-populating database.') flags.DEFINE_boolean('ycsb_include_individual_results', False, 'Include results from each client VM, rather than just ' 'combined results.') flags.DEFINE_boolean('ycsb_reload_database', True, 'Reload database, othewise skip load stage. ' 'Note, this flag is only used if the database ' 'is already loaded.') flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.') flags.DEFINE_list('ycsb_workload_files', ['workloada', 'workloadb'], 'Path to YCSB workload file to use during *run* ' 'stage only. Comma-separated list') flags.DEFINE_list('ycsb_load_parameters', [], 'Passed to YCSB during the load stage. Comma-separated list ' 'of "key=value" pairs.') flags.DEFINE_list('ycsb_run_parameters', [], 'Passed to YCSB during the load stage. Comma-separated list ' 'of "key=value" pairs.') flags.DEFINE_list('ycsb_threads_per_client', ['32'], 'Number of threads per ' 'loader during the benchmark run. Specify a list to vary the ' 'number of clients.') flags.DEFINE_integer('ycsb_preload_threads', None, 'Number of threads per ' 'loader during the initial data population stage. ' 'Default value depends on the target DB.')
flags.DEFINE_list( 'zones', [], 'A list of zones within which to run PerfKitBenchmarker. ' 'This is specific to the cloud provider you are running o`n. ' 'If multiple zones are given, PerfKitBenchmarker will create 1 VM in ' 'zone, until enough VMs are created as specified in each ' 'benchmark. The order in which this flag is applied to VMs is ' 'undefined.') # TODO(user): note that this is currently very GCE specific. Need to create a # module which can traslate from some generic types to provider specific # nomenclature. flags.DEFINE_string( 'machine_type', None, 'Machine ' 'types that will be created for benchmarks that don\'t ' 'require a particular type.') flags.DEFINE_integer( 'num_vms', 1, 'For benchmarks which can make use of a ' 'variable number of machines, the number of VMs to use.') flags.DEFINE_string('image', None, 'Default image that will be ' 'linked to the VM') flags.DEFINE_string( 'run_uri', None, 'Name of the Run. If provided, this ' 'should be alphanumeric and less than or equal to 10 ' 'characters in length.') flags.DEFINE_string( 'owner', getpass.getuser(), 'Owner name. ' 'Used to tag created resources and performance records.') flags.DEFINE_enum('log_level', log_util.INFO, [log_util.DEBUG, log_util.INFO], 'The log level to run at.') flags.DEFINE_enum( 'file_log_level', log_util.DEBUG, [log_util.DEBUG, log_util.INFO], 'Anything logged at this level or higher will be written to the log file.')
'bsearch', 'cache', 'heapsort', 'hsearch', 'icache', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'mergesort', 'qsort', 'str', 'stream', 'tsearch', 'vecmath', 'wcs', 'zlib' } MEMORY_SUITE = { 'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'null', 'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str', 'stream', 'tsearch', 'vm', 'vm-rw', 'wcs', 'zero', 'zlib' } # Run the stressors that are each part of all of the compute related stress-ng # classes: cpu, cpu-cache, and memory. DEFAULT_STRESSORS = sorted( CPU_SUITE.intersection(CPU_CACHE_SUITE).intersection(MEMORY_SUITE)) flags.DEFINE_integer('stress_ng_duration', 10, 'Number of seconds to run the test.') flags.DEFINE_boolean('stress_ng_calc_geomean', True, 'Whether to calculate geomean or not.') flags.DEFINE_list( 'stress_ng_custom_stressors', DEFAULT_STRESSORS, 'List of stressors to run against. Default combines cpu,' 'cpu-cache, and memory suites') flags.DEFINE_list('stress_ng_cpu_methods', [], 'List of cpu methods to run with. By default none are ran.') ALL_WORKLOADS = ['small', 'medium', 'large'] flags.DEFINE_list( 'stress_ng_thread_workloads', ['large'], 'List of threads sizes to run against. Options are' 'small (1 thread total), medium (1 thread per 2 cpus), and ' 'large (1 thread per cpu).')
from perfkitbenchmarker import background_tasks from perfkitbenchmarker import errors from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util FLAGS = flags.FLAGS CONTROL_PORT = 5000 UDP_PORT = 5001 NUTTCP_OUT_FILE = 'nuttcp_results' CPU_OUT_FILE = 'cpu_results' flags.DEFINE_integer( 'nuttcp_max_bandwidth_mb', 10000, 'The maximum bandwidth, in megabytes, to test in a ' 'UDP stream.') flags.DEFINE_integer( 'nuttcp_min_bandwidth_mb', 100, 'The minimum bandwidth, in megabytes, to test in a ' 'UDP stream.') flags.DEFINE_integer( 'nuttcp_bandwidth_step_mb', 1000, 'The amount of megabytes to increase bandwidth in each ' 'UDP stream test.') flags.DEFINE_integer('nuttcp_udp_stream_seconds', 60, 'The amount of time to run the UDP stream test.')
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from perfkitbenchmarker import flags flags.DEFINE_string( 'aws_user_name', 'ubuntu', 'This determines the user name that Perfkit will ' 'attempt to use. This must be changed in order to ' 'use any image other than ubuntu.') flags.DEFINE_integer('aws_provisioned_iops', None, 'IOPS for Provisioned IOPS (SSD) volumes in AWS.') flags.DEFINE_string( 'aws_emr_loguri', None, 'The log-uri parameter to pass to AWS when creating a ' 'cluster. If not set, a bucket will be created.')
# See the License for the specific language governing permissions and # limitations under the License. """Module containing flags applicable across benchmark run on GCP.""" from perfkitbenchmarker import flags # Sentinel value for unspecified platform. GCP_MIN_CPU_PLATFORM_NONE = 'none' flags.DEFINE_string('gcloud_path', 'gcloud', 'The path for the gcloud utility.') flags.DEFINE_list('additional_gcloud_flags', [], 'Additional flags to pass to gcloud.') flags.DEFINE_integer( 'gce_num_local_ssds', 0, 'The number of ssds that should be added to the VM. Note ' 'that this is currently only supported in certain zones ' '(see https://cloud.google.com/compute/docs/local-ssd).') flags.DEFINE_string( 'gcloud_scopes', None, 'If set, space-separated list of ' 'scopes to apply to every created machine') flags.DEFINE_boolean('gce_migrate_on_maintenance', True, 'If true, allow VM ' 'migration on GCE host maintenance.') flags.DEFINE_boolean('gce_preemptible_vms', False, 'If true, use preemptible ' 'VMs on GCE.') flags.DEFINE_string( 'image_family', None, 'The family of the image that the boot disk will be ' 'initialized with. The --image flag will take priority over this flag. See:' ' https://cloud.google.com/sdk/gcloud/reference/compute/instances/create') flags.DEFINE_string( 'image_project', None,
import uuid import numpy as np from perfkitbenchmarker import events from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import dstat import six flags.DEFINE_boolean( 'dstat', False, 'Run dstat (http://dag.wiee.rs/home-made/dstat/) ' 'on each VM to collect system performance metrics during ' 'each benchmark run.') flags.DEFINE_integer( 'dstat_interval', None, 'dstat sample collection frequency, in seconds. Only ' 'applicable when --dstat is specified.') flags.DEFINE_string( 'dstat_output', None, 'Output directory for dstat output. ' 'Only applicable when --dstat is specified. ' 'Default: run temporary directory.') flags.DEFINE_boolean('dstat_publish', False, 'Whether to publish average dstat statistics.') flags.DEFINE_string( 'dstat_publish_regex', None, 'Requires setting ' 'dstat_publish to true. If specified, any dstat statistic ' 'matching this regular expression will be published such ' 'that each individual statistic will be in a sample with ' 'the time since the epoch in the metadata. Examples. Use ' '".*" to record all samples. Use "net" to record ' 'networking statistics.')
# limitations under the License. """Runs the Stencil2D benchmark from the SHOC Benchmark Suite""" import os from perfkitbenchmarker import configs from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import hpc_util from perfkitbenchmarker import vm_util from perfkitbenchmarker import flag_util from perfkitbenchmarker.linux_packages import shoc_benchmark_suite from perfkitbenchmarker.linux_packages import cuda_toolkit from perfkitbenchmarker.linux_packages import nvidia_driver flags.DEFINE_integer('stencil2d_iterations', 5, 'number of iterations to run', lower_bound=1) flag_util.DEFINE_integerlist('stencil2d_problem_sizes', flag_util.IntegerList([4096]), 'problem sizes to run. Can specify a single ' 'number, like --stencil2d_problem_sizes=4096 ' 'or a list like --stencil2d_problem_sizes=' '1024,4096', on_nonincreasing=flag_util.IntegerListParser.WARN, module_name=__name__) FLAGS = flags.FLAGS MACHINEFILE = 'machinefile' BENCHMARK_NAME = 'stencil2d' BENCHMARK_VERSION = '0.25'
""" import datetime import json import logging from perfkitbenchmarker import flags from perfkitbenchmarker import providers from perfkitbenchmarker import dpb_service from perfkitbenchmarker import vm_util from perfkitbenchmarker.providers.gcp import util FLAGS = flags.FLAGS flags.DEFINE_string('dpb_dataproc_image_version', None, 'The image version to use for the cluster.') flags.DEFINE_integer('dpb_dataproc_distcp_num_maps', None, 'Number of maps to copy data.') GCP_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' SPARK_SAMPLE_LOCATION = ('file:///usr/lib/spark/examples/jars/' 'spark-examples.jar') TESTDFSIO_JAR_LOCATION = ('file:///usr/lib/hadoop-mapreduce/' 'hadoop-mapreduce-client-jobclient.jar') TESTDFSIO_PROGRAM = 'TestDFSIO' class GcpDpbDataproc(dpb_service.BaseDpbService): """Object representing a GCP Dataproc cluster.
from perfkitbenchmarker import flags from perfkitbenchmarker import publisher from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util import six FLAGS = flags.FLAGS # The default values for flags and BENCHMARK_CONFIG are not a recommended # configuration for comparing sysbench performance. Rather these values # are set to provide a quick way to verify functionality is working. # A broader set covering different permuations on much larger data sets # is prefereable for comparison. flags.DEFINE_string('sysbench_testname', 'oltp_read_write', 'The built in oltp lua script to run') flags.DEFINE_integer('sysbench_tables', 4, 'The number of tables used in sysbench oltp.lua tests') flags.DEFINE_integer( 'sysbench_table_size', 100000, 'The number of rows of each table used in the oltp tests') flags.DEFINE_integer('sysbench_scale', 100, 'Scale parameter as used by TPCC benchmark.') flags.DEFINE_integer( 'sysbench_warmup_seconds', 10, 'The duration of the warmup run in which results are ' 'discarded, in seconds.') flags.DEFINE_integer( 'sysbench_run_seconds', 10, 'The duration of the actual run in which results are ' 'collected, in seconds.') flag_util.DEFINE_integerlist( 'sysbench_thread_counts',