Esempio n. 1
0
    'Amount of time in seconds to burn cpu on vm before '
    'starting benchmark')
flags.DEFINE_integer(
    'burn_cpu_threads', 1, 'Number of threads to use to '
    'burn cpu before starting benchmark.')
flags.DEFINE_integer(
    'background_cpu_threads', None,
    'Number of threads of background cpu usage while '
    'running a benchmark')
flags.DEFINE_integer(
    'background_network_mbits_per_sec', None,
    'Number of megabits per second of background '
    'network traffic to generate during the run phase '
    'of the benchmark')
flags.DEFINE_boolean(
    'simulate_maintenance', False,
    'Whether to simulate VM maintenance during the benchmark. '
    'This requires both benchmark and provider support.')
flags.DEFINE_integer(
    'simulate_maintenance_delay', 0,
    'The number of seconds to wait to start simulating '
    'maintenance.')


class IpAddressSubset(object):
    """Enum of options for --ip_addresses."""
    REACHABLE = 'REACHABLE'
    BOTH = 'BOTH'
    INTERNAL = 'INTERNAL'
    EXTERNAL = 'EXTERNAL'

    ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
Esempio n. 2
0
from perfkitbenchmarker import flags
from perfkitbenchmarker import log_util
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
import six.moves.http_client as httplib

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'product_name',
    'PerfKitBenchmarker',
    'The product name to use when publishing results.')

flags.DEFINE_boolean(
    'official',
    False,
    'A boolean indicating whether results are official or not. The '
    'default is False. Official test results are treated and queried '
    'differently from non-official test results.')

flags.DEFINE_boolean(
    'hostname_metadata',
    False,
    'A boolean indicating whether to publish VM hostnames as part of sample '
    'metadata.')

flags.DEFINE_string(
    'json_path',
    None,
    'A path to write newline-delimited JSON results '
    'Default: write to a run-specific temporary directory')
flags.DEFINE_enum(
        AWS:
          machine_type: m5.large
          zone: us-east-1
        Azure:
          machine_type: Standard_D2s_v3
          zone: eastus
          boot_disk_type: StandardSSD_LRS
        GCP:
          machine_type: n1-standard-2
          zone: us-central1-a
          boot_disk_type: pd-ssd
      vm_count: null
"""

flags.DEFINE_boolean(
    'cluster_boot_time_reboot', False,
    'Whether to reboot the VMs during the cluster boot benchmark to measure '
    'reboot performance.')
FLAGS = flags.FLAGS


def GetConfig(user_config):
  return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)


def Prepare(unused_benchmark_spec):
  pass


def GetTimeToBoot(vms):
  """Creates Samples for the boot time of a list of VMs.
Esempio n. 4
0
    NVIDIA_TESLA_P100: {
        'base_clock': [715, 1189],
        'max_clock': [715, 1328],
        'autoboost_enabled': None,
    },
    NVIDIA_TESLA_V100: {
        'base_clock': [877, 1312],
        'max_clock': [877, 1530],
        'autoboost_enabled': None,
    },
}

flag_util.DEFINE_integerlist(
    'gpu_clock_speeds', None, 'desired gpu clock speeds in the form '
    '[memory clock, graphics clock]')
flags.DEFINE_boolean('gpu_autoboost_enabled', None,
                     'whether gpu autoboost is enabled')

flags.DEFINE_string(
    'cuda_toolkit_installation_dir', '/usr/local/cuda',
    'installation directory to use for CUDA toolkit. '
    'If the toolkit is not installed, it will be installed '
    'here. If it is already installed, the installation at '
    'this path will be used.')

flags.DEFINE_enum('cuda_toolkit_version',
                  '9.0', ['8.0', '9.0'],
                  'Version of CUDA Toolkit to install',
                  module_name=__name__)

FLAGS = flags.FLAGS
Esempio n. 5
0
import logging
import numpy as np
import os
import posixpath
import time
import threading
import uuid

from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import dstat

flags.DEFINE_boolean(
    'dstat', False, 'Run dstat (http://dag.wiee.rs/home-made/dstat/) '
    'on each VM to collect system performance metrics during '
    'each benchmark run.')
flags.DEFINE_integer(
    'dstat_interval', None,
    'dstat sample collection frequency, in seconds. Only '
    'applicable when --dstat is specified.')
flags.DEFINE_string(
    'dstat_output', None, 'Output directory for dstat output. '
    'Only applicable when --dstat is specified. '
    'Default: run temporary directory.')
flags.DEFINE_boolean('dstat_publish', False,
                     'Whether or not publish dstat statistics.')


class _DStatCollector(object):
    """dstat collector.
Esempio n. 6
0
flags.DEFINE_integer('aws_provisioned_iops', None,
                     'IOPS for Provisioned IOPS (SSD) volumes in AWS.')

flags.DEFINE_string(
    'aws_emr_loguri', None,
    'The log-uri parameter to pass to AWS when creating a '
    'cluster.  If not set, a bucket will be created.')
flags.DEFINE_integer('aws_emr_job_wait_time', 18000,
                     'The time to wait for an EMR job to finish, in seconds')

flags.DEFINE_string(
    's3_custom_endpoint', None,
    'If given, a custom endpoint to use for S3 transfers. If '
    'this flag is not given, use the standard endpoint for the '
    'storage region.')
flags.DEFINE_boolean('aws_spot_instances', False,
                     'Whether to use AWS spot instances for any AWS VMs.')
flags.DEFINE_float(
    'aws_spot_price', None,
    'The spot price to bid for AWS spot instances. Defaults '
    'to on-demand price when left as None.')
flags.DEFINE_integer('aws_boot_disk_size', None,
                     'The boot disk size in GiB for AWS VMs.')
flags.DEFINE_string('kops', 'kops', 'The path to the kops binary.')
flags.DEFINE_string(
    'aws_image_name_filter', None,
    'The filter to use when searching for an image for a VM. '
    'See usage details in aws_virtual_machine.py around '
    'IMAGE_NAME_FILTER.')
flags.DEFINE_string(
    'aws_image_name_regex', None,
    'The Python regex to use to further filter images for a '
Esempio n. 7
0
                    192.168.0.0/29. Errors will occur if the CIDR range has
                    already been used for a currently existing TPU, the CIDR
                    range conflicts with any networks in the user's provided
                    network, or the provided network is peered with another
                    network that is using that CIDR range.""")
flags.DEFINE_string('tpu_accelerator_type', 'tpu-v2',
                    'TPU accelerator type for the TPU.')
flags.DEFINE_string('tpu_description', None,
                    'Specifies a text description of the TPU.')
flags.DEFINE_string('tpu_network', None,
                    'Specifies the network that this TPU will be a part of.')
flags.DEFINE_string('tpu_tf_version', None, 'TensorFlow version for the TPU.')
flags.DEFINE_string('tpu_zone', None,
                    'The zone of the tpu to create. Zone in which TPU lives.')
flags.DEFINE_string('tpu_name', None, 'The name of the cloud TPU to create.')
flags.DEFINE_boolean('tpu_preemptible', False,
                     'Use preemptible cloud TPU or not.')

FLAGS = flags.FLAGS


def GetCloudTpuClass(cloud):
    """Gets the cloud TPU class corresponding to 'cloud'.

  Args:
    cloud: String. name of cloud to get the class for.

  Returns:
    Implementation class corresponding to the argument cloud

  Raises:
    Exception: An invalid cloud TPU was provided
Esempio n. 8
0
    'setting the password itself in $OS_PASSWORD is also '
    'supported.')

flags.DEFINE_string(
    'openstack_nova_endpoint_type', os.getenv('NOVA_ENDPOINT_TYPE',
                                              'publicURL'),
    'OpenStack Nova endpoint type, '
    'defaults to $NOVA_ENDPOINT_TYPE.')

flags.DEFINE_string('openstack_public_network', None,
                    'Name of OpenStack public network')

flags.DEFINE_string('openstack_private_network', 'private',
                    'Name of OpenStack private network')

flags.DEFINE_boolean('openstack_config_drive', False,
                     'Add possibilities to get metadata from external drive')

flags.DEFINE_boolean('openstack_boot_from_volume', False,
                     'Boot from volume instead of an image')

flags.DEFINE_integer('openstack_volume_size', None, 'Size of the volume (GB)')

flags.DEFINE_string('openstack_image_username', 'ubuntu',
                    'Ssh username for cloud image')

NONE = 'None'
flags.DEFINE_enum(
    'openstack_scheduler_policy', NONE, [NONE, 'affinity', 'anti-affinity'],
    'Add possibility to use affinity or anti-affinity '
    'policy in scheduling process')
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.rackspace import rackspace_disk
from perfkitbenchmarker.rackspace import rackspace_machine_types as rax
from perfkitbenchmarker.rackspace import util

FLAGS = flags.FLAGS
flags.DEFINE_boolean(
    'rackspace_apply_onmetal_ssd_tuning',
    default=False,
    help='Apply Rackspace recommended tuning to PCIe-based flash storage '
    'included with OnMetal IO instances. See: '
    'http://www.rackspace.com/knowledge_center/article/'
    'configure-flash-drives-in-high-io-instances-as-data-drives')

CLOUD_CONFIG_TEMPLATE = '''#cloud-config
users:
  - name: {0}
    ssh-authorized-keys:
      - {1}
    sudo: ['ALL=(ALL) NOPASSWD:ALL']
    groups: sudo
    shell: /bin/bash

'''
    'object sizes up to 32MiB (see '
    'data/cloud-storage-workload.sh). \n'
    'Large means all objects are of at least 1GiB.')

flags.DEFINE_string('object_storage_credential_file', None,
                    'Directory of credential file.')

flags.DEFINE_string('boto_file_location', None,
                    'The location of the boto file.')

flags.DEFINE_string(
    'azure_lib_version', None,
    'Use a particular version of azure client lib, e.g.: 1.0.2')

flags.DEFINE_boolean(
    'openstack_swift_insecure', False,
    'Allow swiftclient to access Swift service without \n'
    'having to verify the SSL certificate')

FLAGS = flags.FLAGS

# User a scratch disk here to simulate what most users would do when they
# use CLI tools to interact with the storage provider.
BENCHMARK_INFO = {
    'name':
    'object_storage_service',
    'description':
    'Object/blob storage service benchmarks. Specify '
    '--object_storage_scenario '
    'to select a set of sub-benchmarks to run. default is all.',
    'scratch_disk':
    True,
Esempio n. 11
0
from perfkitbenchmarker import flags

flags.DEFINE_string('gcloud_path', 'gcloud',
                    'The path for the gcloud utility.')
flags.DEFINE_list('additional_gcloud_flags', [],
                  'Additional flags to pass to gcloud.')
flags.DEFINE_integer(
    'gce_num_local_ssds', 0,
    'The number of ssds that should be added to the VM. Note '
    'that this is currently only supported in certain zones '
    '(see https://cloud.google.com/compute/docs/local-ssd).')
flags.DEFINE_string(
    'gcloud_scopes', None, 'If set, space-separated list of '
    'scopes to apply to every created machine')
flags.DEFINE_boolean('gce_migrate_on_maintenance', True, 'If true, allow VM '
                     'migration on GCE host maintenance.')
flags.DEFINE_boolean('gce_preemptible_vms', False, 'If true, use preemptible '
                     'VMs on GCE.')
flags.DEFINE_string(
    'image_project', None,
    'The project against which all image references will'
    ' be resolved. See: '
    'https://cloud.google.com/sdk/gcloud/reference/compute/disks/create')
flags.DEFINE_string(
    'gce_network_name', None, 'The name of an already created '
    'network to use instead of creating a new one.')
flags.DEFINE_string(
    'gce_subnet_region', None, 'Region to create subnet in '
    'instead of automatically creating one in every region.')
flags.DEFINE_string(
    'gce_subnet_addr', '10.128.0.0/20', 'Address range to the '
Esempio n. 12
0
    'or connections does not impact the aggregate target rate for the client.')

flags.DEFINE_string(
    'nginx_conf', None,
    'The path to an Nginx config file that should be applied '
    'to the server instead of the default one.')
flags.DEFINE_integer(
    'nginx_content_size', 1024,
    'The size of the content Nginx will serve in bytes. '
    'Larger files stress the network over the VMs.')
flags.DEFINE_list(
    'nginx_load_configs', ['100:60:1:1'],
    'For each load spec in the list, wrk2 will be run once '
    'against Nginx with those parameters. ' + _FLAG_FORMAT_DESCRIPTION)
flags.DEFINE_boolean(
    'nginx_throttle', False,
    'If True, skip running the nginx_load_configs and run '
    'wrk2 once aiming to throttle the nginx server.')
flags.DEFINE_string(
    'nginx_client_machine_type', None,
    'Machine type to use for the wrk2 client if different '
    'from nginx server machine type.')
flags.DEFINE_string(
    'nginx_server_machine_type', None,
    'Machine type to use for the nginx server if different '
    'from wrk2 client machine type.')


def _ValidateLoadConfigs(load_configs):
    """Validate that each load config has all required values."""
    if not load_configs:
        return False
Esempio n. 13
0
# Sentinel value for unspecified platform.
GCP_MIN_CPU_PLATFORM_NONE = 'none'

flags.DEFINE_string('gcloud_path', 'gcloud',
                    'The path for the gcloud utility.')
flags.DEFINE_list('additional_gcloud_flags', [],
                  'Additional flags to pass to gcloud.')
flags.DEFINE_integer(
    'gce_num_local_ssds', 0,
    'The number of ssds that should be added to the VM. Note '
    'that this is currently only supported in certain zones '
    '(see https://cloud.google.com/compute/docs/local-ssd).')
flags.DEFINE_string(
    'gcloud_scopes', None, 'If set, space-separated list of '
    'scopes to apply to every created machine')
flags.DEFINE_boolean('gce_migrate_on_maintenance', True, 'If true, allow VM '
                     'migration on GCE host maintenance.')
flags.DEFINE_boolean('gce_preemptible_vms', False, 'If true, use preemptible '
                     'VMs on GCE.')
flags.DEFINE_string(
    'image_family', None, 'The family of the image that the boot disk will be '
    'initialized with. The --image flag will take priority over this flag. See:'
    ' https://cloud.google.com/sdk/gcloud/reference/compute/instances/create')
flags.DEFINE_string(
    'image_project', None,
    'The project against which all image references will'
    ' be resolved. See: '
    'https://cloud.google.com/sdk/gcloud/reference/compute/disks/create')
flags.DEFINE_string(
    'gce_network_name', None, 'The name of an already created '
    'network to use instead of creating a new one.')
flags.DEFINE_string(
Esempio n. 14
0
BENCHMARK_NAME = 'aerospike_certification_tool'
BENCHMARK_CONFIG = """
aerospike_certification_tool:
  description: Runs aerospike certification tool.
  vm_groups:
    default:
      vm_spec: *default_single_core
      disk_spec: *default_500_gb
      vm_count: 1
      disk_count: 0
"""

FLAGS = flags.FLAGS
flags.DEFINE_boolean(
    'act_stop_on_complete', True,
    'Stop the benchmark when completing current load. This can be useful '
    'deciding maximum sustained load for stress tests.')


def GetConfig(user_config):
  config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
  if FLAGS.data_disk_type == disk.LOCAL:
    config['vm_groups']['default']['disk_count'] = (
        config['vm_groups']['default']['disk_count'] or None)
  else:
    config['vm_groups']['default']['disk_count'] = (
        config['vm_groups']['default']['disk_count'] or 1)
  return config


def Prepare(benchmark_spec):
Esempio n. 15
0
    'Return=-1': operator.add,
    'Return=-2': operator.add,
    'Return=-3': operator.add,
    'Return=OK': operator.add,
    'Return=ERROR': operator.add,
    'LatencyVariance(ms)': None,
    'AverageLatency(ms)': None,  # Requires both average and # of ops.
    'Throughput(ops/sec)': operator.add,
    '95thPercentileLatency(ms)': None,  # Calculated across clients.
    '99thPercentileLatency(ms)': None,  # Calculated across clients.
    'MinLatency(ms)': min,
    'MaxLatency(ms)': max}


flags.DEFINE_boolean('ycsb_histogram', False, 'Include individual '
                     'histogram results from YCSB (will increase sample '
                     'count).')
flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples '
                     'from pre-populating database.')
flags.DEFINE_boolean('ycsb_include_individual_results', False,
                     'Include results from each client VM, rather than just '
                     'combined results.')
flags.DEFINE_boolean('ycsb_reload_database', True,
                     'Reload database, othewise skip load stage. '
                     'Note, this flag is only used if the database '
                     'is already loaded.')
flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.')
flags.DEFINE_list('ycsb_workload_files', ['workloada', 'workloadb'],
                  'Path to YCSB workload file to use during *run* '
                  'stage only. Comma-separated list')
flags.DEFINE_list('ycsb_load_parameters', [],
Esempio n. 16
0
http://dag.wiee.rs/home-made/dstat/
"""

import functools
import logging
import os
import posixpath
import threading
import uuid

from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util

flags.DEFINE_boolean(
    'dstat', False, 'Run dstat (http://dag.wiee.rs/home-made/dstat/) '
    'on each VM to collect system performance metrics during '
    'each benchmark run.')
flags.DEFINE_integer(
    'dstat_interval', None,
    'dstat sample collection frequency, in seconds. Only '
    'applicable when --dstat is specified.')
flags.DEFINE_string(
    'dstat_output', None, 'Output directory for dstat output. '
    'Only applicable when --dstat is specified. '
    'Default: run temporary directory.')


class _DStatCollector(object):
    """dstat collector.

  Installs and runs dstat on a collection of VMs.
                         message='The number of VMs booted by each launcher '
                         'should be between 1 and 1000.')
flags.DEFINE_string(
    'boot_os_type', 'debian9', 'OS to boot on the VMs. '
    'Defaults to debian9. OS on launcher server VM is set '
    'using os_type flag.')
flags.DEFINE_string(
    'boot_machine_type', 'n1-standard-2', 'Machine type to boot'
    'on the VMs. Defaults to n1-standard-2. Set machine type '
    'on launcher server VM with launcher_machine_type flag.')
flags.DEFINE_string(
    'launcher_machine_type', 'n1-standard-16', 'Machine type '
    'to launcher the VMs. Defaults to n1-standard-16. Set '
    'machine type on boot VMs with boot_machine_type flag.')
flags.DEFINE_boolean(
    'vms_contact_launcher', True, 'Whether launched vms '
    'attempt to contact the launcher before launcher attempts '
    'to connect to them. Default to True.')
flags.DEFINE_boolean(
    'use_public_ip', False, 'Whether launcher should contact '
    'boot vms using public ip instead of internal ip. Only '
    'applicable for vms_contact_launcher=False mode. '
    'Defaults to False.')

# Tag for undefined hostname, should be synced with listener_server.py script.
UNDEFINED_HOSTNAME = 'UNDEFINED'
# Tag for sequential hostname, should be synced with listener_server.py script.
SEQUENTIAL_IP = 'SEQUENTIAL_IP_{}_{}'
# remote tmp directory used for this benchmark.
_REMOTE_DIR = vm_util.VM_TMP_DIR
# boot script to use on the launcher server vms.
_BOOT_SCRIPT = 'boot_script.sh'
# TODO(ferneyhough): change to enum
flags.DEFINE_string('managed_db_engine', None,
                    'Managed database flavor to use (mysql, postgres)')
flags.DEFINE_string('managed_db_engine_version', None,
                    'Version of the database flavor selected, e.g. 5.7')
flags.DEFINE_string(
    'managed_db_database_name', None,
    'Name of the database to create. Defaults to '
    'pkb-db-[run-uri]')
flags.DEFINE_string('managed_db_database_username', None,
                    'Database username. Defaults to '
                    'pkb-db-user-[run-uri]')
flags.DEFINE_string(
    'managed_db_database_password', None, 'Database password. Defaults to '
    'a random 10-character alpha-numeric string')
flags.DEFINE_boolean('managed_db_high_availability', False,
                     'Specifies if the database should be high availability')
flags.DEFINE_boolean('managed_db_backup_enabled', True,
                     'Whether or not to enable automated backups')
flags.DEFINE_string(
    'managed_db_backup_start_time', '07:00',
    'Time in UTC that automated backups (if enabled) '
    'will be scheduled. In the form HH:MM UTC. '
    'Defaults to 07:00 UTC')
flags.DEFINE_list(
    'managed_db_zone', None, 'zone or region to launch the database in. '
    'Defaults to the client vm\'s zone.')
flags.DEFINE_string('managed_db_machine_type', None,
                    'Machine type of the database.')
flags.DEFINE_integer('managed_db_cpus', None,
                     'Number of Cpus in the database.')
flags.DEFINE_string(
                     lower_bound=1)

flags.DEFINE_integer('hpcg_gpus_per_node',
                     None,
                     'The number of gpus per node.',
                     lower_bound=1)

flag_util.DEFINE_integerlist(
    'hpcg_problem_size',
    flag_util.IntegerList([256, 256, 256]),
    'three dimensional problem size for each node. Must contain '
    'three integers',
    module_name=__name__)

flags.DEFINE_boolean('hpcg_run_as_root', False,
                     'If true, pass --allow-run-as-root '
                     'to mpirun.')


class HpcgParseOutputException(Exception):
    pass


class HpcgIncorrectProblemSizeException(Exception):
    pass


def GetConfig(user_config):
    """Load and return benchmark config.

  Args:
Esempio n. 20
0
    'characters in length.')
flags.DEFINE_string(
    'owner', getpass.getuser(), 'Owner name. '
    'Used to tag created resources and performance records.')
flags.DEFINE_enum('log_level', log_util.INFO, [log_util.DEBUG, log_util.INFO],
                  'The log level to run at.')
flags.DEFINE_enum(
    'file_log_level', log_util.DEBUG, [log_util.DEBUG, log_util.INFO],
    'Anything logged at this level or higher will be written to the log file.')
flags.DEFINE_integer(
    'duration_in_seconds', None, 'duration of benchmarks. '
    '(only valid for mesh_benchmark)')
flags.DEFINE_string(
    'static_vm_file', None, 'The file path for the Static Machine file. See '
    'static_virtual_machine.py for a description of this file.')
flags.DEFINE_boolean('version', False, 'Display the version and exit.')
flags.DEFINE_enum('scratch_disk_type', None,
                  [disk.STANDARD, disk.REMOTE_SSD, disk.PIOPS, disk.LOCAL],
                  'Type for all scratch disks. The default is standard')
flags.DEFINE_string(
    'data_disk_type', None,
    'Type for all data disks. If a provider keeps the operating system and '
    'user data on separate disks, this only affects the user data disk(s).'
    'If the provider has OS and user data on the same disk, this flag affects'
    'that disk.')
flags.DEFINE_integer('scratch_disk_size', None, 'Size, in gb, for all scratch '
                     'disks.')
flags.DEFINE_integer('data_disk_size', None,
                     'Size, in gb, for all data disks.')
flags.DEFINE_integer('scratch_disk_iops', None,
                     'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
Esempio n. 21
0
    'Operations': operator.add,
    'RunTime(ms)': max,
    'Return=0': operator.add,
    'Return=-1': operator.add,
    'Return=-2': operator.add,
    'Return=-3': operator.add,
    'AverageLatency(ms)': None,  # Requires both average and # of ops.
    'Throughput(ops/sec)': operator.add,
    '95thPercentileLatency(ms)': None,  # Calculated across clients.
    '99thPercentileLatency(ms)': None,  # Calculated across clients.
    'MinLatency(ms)': min,
    'MaxLatency(ms)': max
}

flags.DEFINE_boolean(
    'ycsb_histogram', True, 'Include individual '
    'histogram results from YCSB (will increase sample '
    'count).')
flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples '
                     'from pre-populating database.')
flags.DEFINE_boolean(
    'ycsb_include_individual_results', False,
    'Include results from each client VM, rather than just '
    'combined results.')
flags.DEFINE_integer('ycsb_client_vms',
                     1,
                     'Number of YCSB client VMs.',
                     lower_bound=1)
flags.DEFINE_list(
    'ycsb_workload_files', [],
    'Path to YCSB workload file to use during *run* '
    'stage only. Comma-separated list')
Esempio n. 22
0
    'Amount of time in seconds to burn cpu on vm before '
    'starting benchmark')
flags.DEFINE_integer(
    'burn_cpu_threads', 1, 'Number of threads to use to '
    'burn cpu before starting benchmark.')
flags.DEFINE_integer(
    'background_cpu_threads', None,
    'Number of threads of background cpu usage while '
    'running a benchmark')
flags.DEFINE_integer(
    'background_network_mbits_per_sec', None,
    'Number of megabits per second of background '
    'network traffic to generate during the run phase '
    'of the benchmark')
flags.DEFINE_boolean(
    'simulate_maintenance', False,
    'Whether to simulate VM maintenance during the benchmark. '
    'This requires both benchmark and provider support.')
flags.DEFINE_integer(
    'simulate_maintenance_delay', 0,
    'The number of seconds to wait to start simulating '
    'maintenance.')
flags.DEFINE_boolean(
    'ssh_reuse_connections', True,
    'Whether to reuse SSH connections rather than '
    'reestablishing a connection for each remote command.')
# We set this to the short value of 5 seconds so that the cluster boot benchmark
# can measure a fast connection when bringing up a VM. This avoids retries that
# may not be as quick as every 5 seconds when specifying a larger value.
flags.DEFINE_integer('ssh_connect_timeout',
                     5,
                     'timeout for SSH connection.',
Esempio n. 23
0
    'log_level', log_util.INFO,
    [log_util.DEBUG, log_util.INFO],
    'The log level to run at.')
flags.DEFINE_enum(
    'run_stage', STAGE_ALL,
    [STAGE_ALL, STAGE_PREPARE, STAGE_RUN, STAGE_CLEANUP],
    'The stage of perfkitbenchmarker to run. By default it runs all stages.')
flags.DEFINE_list('benchmark_config_pair', None,
                  'Benchmark and its config file pair, separated by :.')
flags.DEFINE_integer('duration_in_seconds', None,
                     'duration of benchmarks. '
                     '(only valid for mesh_benchmark)')
flags.DEFINE_string('static_vm_file', None,
                    'The file path for the Static Machine file. See '
                    'static_virtual_machine.py for a description of this file.')
flags.DEFINE_boolean('version', False, 'Display the version and exit.')
flags.DEFINE_enum(
    'scratch_disk_type', disk.STANDARD,
    [disk.STANDARD, disk.REMOTE_SSD, disk.PIOPS, disk.LOCAL],
    'Type for all scratch disks. The default is standard')
flags.DEFINE_integer('scratch_disk_iops', 1500,
                     'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('num_striped_disks', 1,
                     'The number of disks to stripe together to form one '
                     '"logical" scratch disk. This defaults to 1 '
                     '(except with local disks), which means no striping. '
                     'When using local disks, they default to striping '
                     'all disks together.',
                     lower_bound=1)
flags.DEFINE_bool('install_packages', True,
                  'Override for determining whether packages should be '
Esempio n. 24
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from perfkitbenchmarker import flags

flags.DEFINE_string(
    'ali_user_name', 'ubuntu',
    'This determines the user name that Perfkit will '
    'attempt to use. This must be changed in order to '
    'use any image other than ubuntu.')
flags.DEFINE_integer('ali_bandwidth_in', 100, 'Inbound Bandwidth')
flags.DEFINE_integer('ali_bandwidth_out', 100, 'Outbound Bandwidth')
flags.DEFINE_string(
    'ali_io_optimized', None,
    'IO optimized for disk in AliCloud. The default is '
    'None which means no IO optimized '
    '"optimized" means use IO optimized. If you '
    'choose optimized, you must specify the system disk type')
flags.DEFINE_string(
    'ali_system_disk_type', 'cloud',
    'System disk catogory for AliCloud. The default is '
    '"cloud" for General cloud disk, '
    '"cloud_ssd" for cloud ssd disk, '
    '"cloud_efficiency" for efficiency cloud disk, '
    '"ephemeral_ssd" for local ssd disk')
flags.DEFINE_boolean('ali_use_vpc', True, 'Use VPC to create networks')
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec

FLAGS = flags.FLAGS
DEFAULT_USERNAME = '******'

_VM_SPEC_REGISTRY = {}
_VM_REGISTRY = {}


flags.DEFINE_boolean(
    'dedicated_hosts', False,
    'If True, use hosts that only have VMs from the same '
    'benchmark running on them.')
flags.DEFINE_list('vm_metadata', [], 'Metadata to add to the vm '
                  'via the provider\'s AddMetadata function. It expects'
                  'key:value pairs')


def GetVmSpecClass(cloud):
  """Returns the VmSpec class corresponding to 'cloud'."""
  return _VM_SPEC_REGISTRY.get(cloud, BaseVmSpec)


def GetVmClass(cloud, os_type):
  """Returns the VM class corresponding to 'cloud' and 'os_type'."""
  return _VM_REGISTRY.get((cloud, os_type))
Esempio n. 26
0
FLAGS = flags.FLAGS
flags.DEFINE_list(
    'spec17_subset', ['intspeed', 'fpspeed', 'intrate', 'fprate'],
    'Specify which speccpu2017 tests to run. Accepts a list of '
    'benchmark suites (intspeed, fpspeed, intrate, fprate) '
    'or individual benchmark names. Defaults to all suites.')
flags.DEFINE_integer(
    'spec17_copies', None,
    'Number of copies to run for rate tests. If not set '
    'default to number of cpu cores using lscpu.')
flags.DEFINE_integer(
    'spec17_threads', None,
    'Number of threads to run for speed tests. If not set '
    'default to number of cpu threads using lscpu.')
flags.DEFINE_boolean(
    'spec17_fdo', False, 'Run with feedback directed optimization on peak. '
    'Default to False.')

BENCHMARK_NAME = 'speccpu2017'
BENCHMARK_CONFIG = """
speccpu2017:
  description: Runs SPEC CPU2017
  vm_groups:
    default:
      vm_spec: *default_single_core
      disk_spec: *default_500_gb
      os_type: ubuntu1604
  speccpu:
    runspec_config: pkb-crosstool-llvm-linux-x86-fdo.cfg
"""
                     lower_bound=0)
flag_util.DEFINE_units(
    'fio_blocksize',
    None, 'The block size for fio operations. Default is given by '
    'the scenario when using --generate_scenarios. This '
    'flag does not apply when using --fio_jobfile.',
    convertible_to=units.byte)
flags.DEFINE_integer('fio_runtime',
                     600,
                     'The number of seconds to run each fio job for.',
                     lower_bound=1)
flags.DEFINE_list(
    'fio_parameters', ['randrepeat=0'],
    'Parameters to apply to all PKB generated fio jobs. Each '
    'member of the list should be of the form "param=value".')
flags.DEFINE_boolean('fio_lat_log', False,
                     'Whether to collect a latency log of the fio jobs.')
flags.DEFINE_boolean('fio_bw_log', False,
                     'Whether to collect a bandwidth log of the fio jobs.')
flags.DEFINE_boolean('fio_iops_log', False,
                     'Whether to collect an IOPS log of the fio jobs.')
flags.DEFINE_integer(
    'fio_log_avg_msec',
    1000, 'By default, this will average each log entry in the '
    'fio latency, bandwidth, and iops logs over the specified '
    'period of time in milliseconds. If set to 0, fio will '
    'log an entry for every IO that completes, this can grow '
    'very quickly in size and can cause performance overhead.',
    lower_bound=0)
flags.DEFINE_boolean('fio_hist_log', False,
                     'Whether to collect clat histogram.')
flags.DEFINE_integer(
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import re

from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.vm_util import OUTPUT_STDOUT as STDOUT,\
    OUTPUT_STDERR as STDERR, OUTPUT_EXIT_CODE as EXIT_CODE

FLAGS = flags.FLAGS

flags.DEFINE_boolean('use_ceph_volumes', True,
                     'Use Ceph volumes for scratch disks')

flags.DEFINE_string(
    'ceph_secret', None,
    'Name of the Ceph Secret used by Kubernetes in order to '
    'authenticate with Ceph.')

flags.DEFINE_string('rbd_pool', 'rbd', 'Name of RBD pool for Ceph volumes.')

flags.DEFINE_list(
    'ceph_monitors', [], 'IP addresses and ports of Ceph Monitors. '
    'Must be provided when scratch disk is required. '
    'Example: "127.0.0.1:6789,192.168.1.1:6789"')


class CephDisk(disk.BaseDisk):
Esempio n. 29
0
flags.DEFINE_string(
    'owner', getpass.getuser(), 'Owner name. '
    'Used to tag created resources and performance records.')
flags.DEFINE_enum('log_level', log_util.INFO, log_util.LOG_LEVELS.keys(),
                  'The log level to run at.')
flags.DEFINE_enum(
    'file_log_level', log_util.DEBUG, log_util.LOG_LEVELS.keys(),
    'Anything logged at this level or higher will be written to the log file.')
flags.DEFINE_integer(
    'duration_in_seconds', None, 'duration of benchmarks. '
    '(only valid for mesh_benchmark)')
flags.DEFINE_string(
    'static_vm_file', None, 'The file path for the Static Machine file. See '
    'static_virtual_machine.py for a description of this file.')
flags.DEFINE_boolean('version',
                     False,
                     'Display the version and exit.',
                     allow_override_cpp=True)
flags.DEFINE_boolean('time_commands', False, 'Times each command issued.')
flags.DEFINE_enum('scratch_disk_type', None,
                  [disk.STANDARD, disk.REMOTE_SSD, disk.PIOPS, disk.LOCAL],
                  'Type for all scratch disks. The default is standard')
flags.DEFINE_string(
    'data_disk_type', None,
    'Type for all data disks. If a provider keeps the operating system and '
    'user data on separate disks, this only affects the user data disk(s).'
    'If the provider has OS and user data on the same disk, this flag affects'
    'that disk.')
flags.DEFINE_integer('scratch_disk_size', None, 'Size, in gb, for all scratch '
                     'disks.')
flags.DEFINE_integer('data_disk_size', None,
                     'Size, in gb, for all data disks.')
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'aws_credentials_local_path', os.path.join('~', '.aws'),
    'Path where the AWS credential files can be found on the local machine.')

flags.DEFINE_string(
    'aws_credentials_remote_path', '.aws',
    'Path where the AWS credential files will be written on remote machines.')

flags.DEFINE_boolean(
    'aws_credentials_overwrite', False,
    'When set, if an AWS credential file already exists at the destination '
    'specified by --aws_credentials_remote_path, it will be overwritten during '
    'AWS credential file installation.')
flags.DEFINE_string('aws_s3_region', None, 'Region for the S3 bucket')


def _GetLocalPath():
    """Gets the expanded local path of the credential files.

  Returns:
    string. Path to the credential files on the local machine.
  """
    return os.path.expanduser(FLAGS.aws_credentials_local_path)


def GetCredentials(credentials_file_name='credentials'):