# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from perfkitbenchmarker import flags flags.DEFINE_boolean('mesos_privileged_docker', False, 'If set to True, will attempt to create Docker containers ' 'in a privileged mode. Note that some benchmarks execute ' 'commands which are only allowed in privileged mode.') flags.DEFINE_integer('docker_memory_mb', 2048, 'Memory limit for docker containers.') flags.DEFINE_float('docker_cpus', 1, 'CPU limit for docker containers.') flags.DEFINE_string('marathon_address', 'localhost:8080', 'Marathon IP address and port.') flags.DEFINE_string('marathon_auth', 'root:password', 'Marathon server basic authentication.')
from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import oldisim_dependencies FLAGS = flags.FLAGS flags.DEFINE_integer('oldisim_num_leaves', 4, 'number of leaf nodes', lower_bound=1, upper_bound=64) flags.DEFINE_list('oldisim_fanout', [], 'a list of fanouts to be tested. ' 'a root can connect to a subset of leaf nodes (fanout). ' 'the value of fanout has to be smaller than num_leaves.') flags.DEFINE_enum('oldisim_latency_metric', 'avg', ['avg', '50p', '90p', '95p', '99p', '99.9p'], 'Allowable metrics for end-to-end latency') flags.DEFINE_float('oldisim_latency_target', '30', 'latency target in ms') NUM_DRIVERS = 1 NUM_ROOTS = 1 BENCHMARK_NAME = 'oldisim' BENCHMARK_CONFIG = """ oldisim: description: > Run oldisim. Specify the number of leaf nodes with --oldisim_num_leaves vm_groups: default: vm_spec: *default_single_core """
flags.DEFINE_integer( 'run_stage_retries', 0, 'The number of allowable consecutive failures during the run stage. After ' 'this number of failures any exceptions will cause benchmark termination. ' 'If run_stage_time is exceeded, the run stage will not be retried even if ' 'the number of failures is less than the value of this flag.') flags.DEFINE_boolean( 'boot_samples', False, 'Whether to publish boot time samples for all tests.') flags.DEFINE_integer( 'run_processes', 1, 'The number of parallel processes to use to run benchmarks.', lower_bound=1) flags.DEFINE_float( 'run_processes_delay', None, 'The delay in seconds between parallel processes\' invocation. ' 'Increasing this value may reduce provider throttling issues.', lower_bound=0) flags.DEFINE_string( 'completion_status_file', None, 'If specified, this file will contain the completion status of each ' 'benchmark that ran (SUCCEEDED, FAILED, or SKIPPED). The file has one json ' 'object per line, each with the following format:\n' '{ "name": <benchmark name>, "flags": <flags dictionary>, ' '"status": <completion status> }') flags.DEFINE_string( 'helpmatch', '', 'Shows only flags defined in a module whose name matches the given regex.', allow_override_cpp=True) flags.DEFINE_boolean( 'create_failed_run_samples', False,
# limitations under the License. """Module containing aerospike server installation and cleanup functions.""" import logging import tempfile from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import INSTALL_DIR FLAGS = flags.FLAGS GIT_REPO = 'https://github.com/aerospike/act.git' ACT_DIR = '%s/act' % INSTALL_DIR flags.DEFINE_float('act_load', 1.0, 'Load multiplier for act test per device.') flags.DEFINE_boolean('act_parallel', False, 'Run act tools in parallel. One copy per device.') flags.DEFINE_integer('act_duration', 86400, 'Duration of act test in seconds.') # TODO(user): Support user provided config file. ACT_CONFIG_TEMPLATE = """ device-names: {devices} num-queues: 8 threads-per-queue: 8 test-duration-sec: {duration} report-interval-sec: 1 large-block-op-kbytes: 128 record-bytes: 1536 read-reqs-per-sec: {read_iops} write-reqs-per-sec: {write_iops} microsecond-histograms: no
flags.DEFINE_integer( 'ycsb_timelimit', 1800, 'Maximum amount of time to run ' 'each workload / client count combination. Set to 0 for ' 'unlimited time.') flags.DEFINE_integer( 'ycsb_field_count', None, 'Number of fields in a record. ' 'Defaults to None which uses the ycsb default of 10.') flags.DEFINE_integer( 'ycsb_field_length', None, 'Size of each field. Defaults ' 'to None which uses the ycsb default of 100.') flags.DEFINE_enum( 'ycsb_requestdistribution', None, ['uniform', 'zipfian', 'latest'], 'Type of request distribution. ' 'This will overwrite workload file parameter') flags.DEFINE_float( 'ycsb_readproportion', None, 'The read proportion, ' 'Default is 0.5 in workloada and 0.95 in YCSB.') flags.DEFINE_float( 'ycsb_updateproportion', None, 'The update proportion, ' 'Default is 0.5 in workloada and 0.05 in YCSB.') flags.DEFINE_float( 'ycsb_scanproportion', None, 'The scan proportion, ' 'Default is 0 in workloada and 0 in YCSB.') # Default loading thread count for non-batching backends. DEFAULT_PRELOAD_THREADS = 32 # Customer YCSB tar url. If not set, the official YCSB release will be used. _ycsb_tar_url = None
flags.DEFINE_string( 'aws_emr_loguri', None, 'The log-uri parameter to pass to AWS when creating a ' 'cluster. If not set, a bucket will be created.') flags.DEFINE_integer('aws_emr_job_wait_time', None, 'The time to wait for an EMR job to finish, in seconds') flags.DEFINE_string( 's3_custom_endpoint', None, 'If given, a custom endpoint to use for S3 transfers. If ' 'this flag is not given, use the standard endpoint for the ' 'storage region.') flags.DEFINE_boolean('aws_spot_instances', False, 'Whether to use AWS spot instances for any AWS VMs.') flags.DEFINE_float( 'aws_spot_price', None, 'The spot price to bid for AWS spot instances. Defaults ' 'to on-demand price when left as None.') flags.DEFINE_integer('aws_boot_disk_size', None, 'The boot disk size in GiB for AWS VMs.') flags.DEFINE_string('kops', 'kops', 'The path to the kops binary.') flags.DEFINE_string( 'aws_image_name_filter', None, 'The filter to use when searching for an image for a VM. ' 'See usage details in aws_virtual_machine.py around ' 'IMAGE_NAME_FILTER.') flags.DEFINE_string( 'aws_image_name_regex', None, 'The Python regex to use to further filter images for a ' 'VM. This applies after the aws_image_name_filter. See ' 'usage details in aws_virtual_machine.py around ' 'IMAGE_NAME_REGEX.')
FLAGS = flags.FLAGS flags.DEFINE_enum( 'mutilate_protocol', 'binary', ['binary', 'ascii'], 'Protocol to use. Supported protocols are binary and ascii.') flags.DEFINE_list('mutilate_qps', [], 'Target aggregate QPS. If not set, target for peak qps.') flags.DEFINE_integer('mutilate_time', 300, 'Maximum time to run (seconds).') flags.DEFINE_string('mutilate_keysize', '16', 'Length of memcached keys (distribution).') flags.DEFINE_string('mutilate_valuesize', '128', 'Length of memcached values (distribution).') flags.DEFINE_integer('mutilate_records', 10000, 'Number of memcached records to use.') flags.DEFINE_float('mutilate_ratio', 0.0, 'Ratio of set:get. By default, read only.') flags.DEFINE_list( 'mutilate_options', [], 'Additional mutilate long-form options (--) in comma separated form. e.g.' '--mutilate_options=blocking,search=99:1000.' 'See https://github.com/leverich/mutilate for all available options.') # If more than one value provided for threads, connections, depths, we will # enumerate all test configurations. e.g. # threads=1,2; connections=3,4; depths=5,6 # We will test following threads:connections:depths: # 1,3,5; 1,3,6; 1,4,5; 1,4,6; 2,3,5; 2,3,6; 2,4,5; 2,4,6; flags.DEFINE_list('mutilate_threads', [1], 'Number of total client threads to spawn per client VM.') flags.DEFINE_list('mutilate_connections', [1], 'Number of connections to establish per client thread.')
flags.DEFINE_integer('aws_provisioned_iops', None, 'IOPS for Provisioned IOPS (SSD) volumes in AWS.') flags.DEFINE_string( 'aws_emr_loguri', None, 'The log-uri parameter to pass to AWS when creating a ' 'cluster. If not set, a bucket will be created.') flags.DEFINE_integer('aws_emr_job_wait_time', None, 'The time to wait for an EMR job to finish, in seconds') flags.DEFINE_string( 's3_custom_endpoint', None, 'If given, a custom endpoint to use for S3 transfers. If ' 'this flag is not given, use the standard endpoint for the ' 'storage region.') flags.DEFINE_boolean('aws_spot_instances', False, 'Whether to use AWS spot instances for any AWS VMs.') flags.DEFINE_float('aws_spot_price', 0.0, 'The spot price to bid for AWS spot instances.') flags.DEFINE_integer('aws_boot_disk_size', None, 'The boot disk size in GiB for AWS VMs.') flags.DEFINE_string('kops', 'kops', 'The path to the kops binary.') flags.DEFINE_string('aws_image_name_filter', None, 'The filter to use when searching for an image for a VM.') flags.DEFINE_string('aws_preprovisioned_data_bucket', None, 'AWS bucket where pre-provisioned data has been copied.') flags.DEFINE_string('redis_node_type', 'cache.m4.large', 'The AWS node type to use for cloud redis') flags.DEFINE_string('aws_elasticache_failover_zone', None, 'AWS elasticache failover zone')
flags.DEFINE_integer('aws_provisioned_iops', None, 'IOPS for Provisioned IOPS (SSD) volumes in AWS.') flags.DEFINE_string('aws_dax_node_type', 'dax.r4.large', 'The node type used for creating AWS DAX cluster.') flags.DEFINE_integer('aws_dax_replication_factor', 3, 'The replication factor of AWS DAX cluster.') flags.DEFINE_string('aws_emr_loguri', None, 'The log-uri parameter to pass to AWS when creating a ' 'cluster. If not set, a bucket will be created.') flags.DEFINE_integer('aws_emr_job_wait_time', 18000, 'The time to wait for an EMR job to finish, in seconds') flags.DEFINE_boolean('aws_spot_instances', False, 'Whether to use AWS spot instances for any AWS VMs.') flags.DEFINE_float('aws_spot_price', None, 'The spot price to bid for AWS spot instances. Defaults ' 'to on-demand price when left as None.') flags.DEFINE_enum('aws_spot_block_duration_minutes', None, ['60', '120', '180', '240', '300', '360'], 'The required ' 'duration for the Spot Instances (also known as Spot blocks),' ' in minutes. This value must be a multiple of 60.') flags.DEFINE_integer('aws_boot_disk_size', None, 'The boot disk size in GiB for AWS VMs.') flags.DEFINE_string('kops', 'kops', 'The path to the kops binary.') flags.DEFINE_string('aws_image_name_filter', None, 'The filter to use when searching for an image for a VM. ' 'See usage details in aws_virtual_machine.py around ' 'IMAGE_NAME_FILTER.') flags.DEFINE_string('aws_image_name_regex', None, 'The Python regex to use to further filter images for a '
os_type: ubuntu1604 vm_spec: GCP: machine_type: n1-standard-4 zone: us-east1-d boot_disk_size: 200 AWS: machine_type: p2.xlarge zone: us-east-1 boot_disk_size: 200 Azure: machine_type: Standard_NC6 zone: eastus """ flags.DEFINE_float('inception3_learning_rate', 0.165, 'Learning rate.') flags.DEFINE_integer('inception3_train_steps', 250000, 'Number of steps use for training.') flags.DEFINE_enum( 'inception3_use_data', 'real', ['real', 'fake'], 'Whether to use real or fake data. If real, the data is ' 'downloaded from imagenet_data_dir. Otherwise, synthetic ' 'data is generated.') flags.DEFINE_enum('inception3_mode', 'train', ['train', 'eval', 'train_and_eval'], 'Mode to run: train, eval, train_and_eval') flags.DEFINE_integer('inception3_train_steps_per_eval', 2000, 'Number of training steps to run between evaluations.') flags.DEFINE_integer( 'inception3_save_checkpoints_secs', 0, 'Interval (in ' 'seconds) at which the model data should be checkpointed. '
flags.DEFINE_integer('specsfs2014_incr_load', 1, 'The amount to increment "load" by for each run.', lower_bound=1) flags.DEFINE_integer( 'specsfs2014_num_runs', 1, 'The total number of SPEC runs. The load for the nth run is ' '"load" + n * "specsfs_incr_load".', lower_bound=1) flags.DEFINE_boolean( 'specsfs2014_auto_mode', False, 'If True, automatically find the max passing score for each benchmark. ' 'This ignores other flags such as specsfs2014_load, specsfs2014_incr_load, ' 'and specsfs2014_num_runs.') flags.DEFINE_float( 'specsfs2014_auto_mode_upper_bound', float('inf'), 'The upper bound for specsfs load. Relevant when specsfs2014_auto_mode ' 'is set to True.') BENCHMARK_NAME = 'specsfs2014' BENCHMARK_CONFIG = """ specsfs2014: description: > Run SPEC SFS 2014. For a full explanation of all benchmark modes see http://www.spec.org/sfs2014/. In order to run this benchmark copy your 'SPECsfs2014_SP1.iso' and 'netmist_license_key' files into the data/ directory. vm_groups: clients: vm_spec: *default_single_core vm_count: null gluster_servers: