'If testing GSI/LSI, use the primary keyname'
                    'of the index you want to test')
flags.DEFINE_enum('aws_dynamodb_attributetype',
                  'S', ['S', 'N', 'B'],
                  'The type of attribute, default to S (String).'
                  'Alternates are N (Number) and B (Binary).')
flags.DEFINE_integer('aws_dynamodb_read_capacity',
                     '5',
                     'Set RCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_write_capacity',
                     '5',
                     'Set WCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_lsi_count',
                     0, 'Set amount of Local Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_lsi_count',
                         lambda value: -1 < value < 6,
                         message='--count must be from 0-5')
flags.register_validator('aws_dynamodb_use_sort',
                         lambda sort: sort or not FLAGS.aws_dynamodb_lsi_count,
                         message='--aws_dynamodb_lsi_count requires sort key.')
flags.DEFINE_integer('aws_dynamodb_gsi_count',
                     0, 'Set amount of Global Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_gsi_count',
                         lambda value: -1 < value < 6,
                         message='--count must be from 0-5')
flags.DEFINE_boolean('aws_dynamodb_ycsb_consistentReads',
                     False,
                     "Consistent reads cost 2x eventual reads. "
                     "'false' is default which is eventual")

Ejemplo n.º 2
0
          zone: eastus
"""

GPU = 'gpu'
CPU = 'cpu'
IMAGENET_SHAPE = '3,299,299'

MODELS = [
    'alexnet', 'googlenet', 'inception-bn', 'inception-resnet-v2',
    'inception-v3', 'inception-v4', 'lenet', 'mlp', 'mobilenet', 'resnet-v1',
    'resnet', 'resnext', 'vgg'
]
flags.DEFINE_list('mx_models', ['inception-v3', 'vgg', 'alexnet', 'resnet'],
                  'The network to train')
flags.register_validator(
    'mx_models', lambda models: models and set(models).issubset(MODELS),
    'Invalid models list. mx_models must be a subset of ' + ', '.join(MODELS))
flags.DEFINE_integer('mx_batch_size', None, 'The batch size for SGD training.')
flags.DEFINE_integer('mx_num_epochs', 80,
                     'The maximal number of epochs to train.')
flags.DEFINE_enum('mx_device', GPU, [CPU, GPU],
                  'Device to use for computation: cpu or gpu')
flags.DEFINE_integer(
    'mx_num_layers', None, 'Number of layers in the neural '
    'network, required by some networks such as resnet')

DEFAULT_BATCH_SIZE = 64
DEFAULT = 'default'
DEFAULT_BATCH_SIZES_BY_MODEL = {
    'vgg': {
        16: 32
Ejemplo n.º 3
0
    if option == MEASUREMENTS_NONE and len(options_list) != 1:
      raise flags.ValidationError(
          '%s: Cannot combine with other --%s options' % (
              option, MEASUREMENTS_FLAG_NAME))
  return True


flags.DEFINE_list(
    MEASUREMENTS_FLAG_NAME, MEASUREMENTS_END_TO_END_RUNTIME,
    'Comma-separated list of values from <%s> that selects which timing '
    'measurements to enable. Measurements will be included as samples in the '
    'benchmark results. %s' % (
        '|'.join(MEASUREMENTS_ALL),
        ' '.join(['%s: %s' % (option, description) for option, description in
                  MEASUREMENTS_ALL.iteritems()])))
flags.register_validator(
    MEASUREMENTS_FLAG_NAME, ValidateMeasurementsFlag)


def _GenerateIntervalSamples(interval, include_timestamps):
  """Generates Samples for a single interval timed by IntervalTimer.Measure.

  Args:
    interval: A (name, start_time, stop_time) tuple from a call to
      IntervalTimer.Measure.
    include_timestamps: A Boolean that controls whether Samples containing the
      start and stop timestamps are added to the generated list.

  Returns:
    A list of 0 to 3 Samples as specified by the args. When included, the
    Samples appear in the order of runtime, start timestamp, stop timestamp.
  """
Ejemplo n.º 4
0
    'benchmark running on them.')
flags.DEFINE_integer(
    'num_vms_per_host', None,
    'The number of VMs per dedicated host. If None, VMs will be packed on a '
    'single host until no more can be packed at which point a new host will '
    'be created.')
flags.DEFINE_integer(
    'num_cpus_override', None,
    'Rather than detecting the number of CPUs present on the machine, use this '
    'value if set. Some benchmarks will use this number to automatically '
    'scale their configurations; this can be used as a method to control '
    'benchmark scaling. It will also change the num_cpus metadata '
    'published along with the benchmark data.')
flags.DEFINE_list(VM_METADATA, [], 'Metadata to add to the vm. It expects'
                  'key:value pairs.')
flags.register_validator(VM_METADATA, ValidateVmMetadataFlag)
flags.DEFINE_bool(
    'skip_firewall_rules', False,
    'If set, this run will not create firewall rules. This is useful if the '
    'user project already has all of the firewall rules in place and/or '
    'creating new ones is expensive')

# Note: If adding a gpu type here, be sure to add it to
# the flag definition in pkb.py too.
VALID_GPU_TYPES = ['k80', 'p100', 'v100', 'p4', 'p4-vws', 't4']


def GetVmSpecClass(cloud):
    """Returns the VmSpec class corresponding to 'cloud'."""
    return spec.GetSpecClass(BaseVmSpec, CLOUD=cloud)
                    'a random 10-character alpha-numeric string')
flags.DEFINE_boolean('managed_db_high_availability', False,
                     'Specifies if the database should be high availability')
flags.DEFINE_boolean('managed_db_backup_enabled', True,
                     'Whether or not to enable automated backups')
flags.DEFINE_string('managed_db_backup_start_time', '07:00',
                    'Time in UTC that automated backups (if enabled) '
                    'will be scheduled. In the form HH:MM UTC. '
                    'Defaults to 07:00 UTC')
flags.DEFINE_string('managed_db_zone', None,
                    'zone or region to launch the database in. '
                    'Defaults to the client vm\'s zone.')

BACKUP_TIME_REGULAR_EXPRESSION = '^\d\d\:\d\d$'
flags.register_validator(
    'managed_db_backup_start_time',
    lambda value: re.search(BACKUP_TIME_REGULAR_EXPRESSION, value) is not None,
    message=('--database_backup_start_time must be in the form HH:MM'))

MYSQL = 'mysql'
POSTGRES = 'postgres'
AURORA_POSTGRES = 'aurora-postgresql'

FLAGS = flags.FLAGS

# TODO: Implement DEFAULT BACKUP_START_TIME for instances.


class ManagedRelationalDbPropertyNotSet(Exception):
  pass

Ejemplo n.º 6
0
from perfkitbenchmarker.linux_packages import gluster

FLAGS = flags.FLAGS
BENCHMARKS = ['VDI', 'DATABASE', 'SWBUILD', 'VDA']

flags.DEFINE_string(
    'specsfs2014_config', None,
    'This flag can be used to specify an alternate SPEC config file to use. '
    'If this option is specified, none of the other benchmark specific flags '
    'which operate on the config file will be used (since the default config '
    'file will be replaced by this one).')
flags.DEFINE_list('specsfs2014_benchmarks', BENCHMARKS,
                  'The SPEC SFS 2014 benchmarks to run.')
flags.register_validator(
    'specsfs2014_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(BENCHMARKS),
    'Invalid benchmarks list. specsfs2014_benchmarks must be a subset of ' +
    ', '.join(BENCHMARKS))
flag_util.DEFINE_integerlist(
    'specsfs2014_load', [1],
    'The starting load in units of SPEC "business metrics". The meaning of '
    'business metric varies depending on the SPEC benchmark (e.g. VDI has '
    'load measured in virtual desktops).',
    module_name=__name__)
flags.DEFINE_integer('specsfs2014_incr_load',
                     1,
                     'The amount to increment "load" by for each run.',
                     lower_bound=1)
flags.DEFINE_integer(
    'specsfs2014_num_runs',
    1, 'The total number of SPEC runs. The load for the nth run is '
Ejemplo n.º 7
0
    'aws_dynamodb_sortkey', 'sort_key', 'The sortkey of dynamodb table.  '
    'This switches to primarykey if using sort.'
    'If testing GSI/LSI, use the primary keyname'
    'of the index you want to test')
flags.DEFINE_enum(
    'aws_dynamodb_attributetype', 'S', ['S', 'N', 'B'],
    'The type of attribute, default to S (String).'
    'Alternates are N (Number) and B (Binary).')
flags.DEFINE_integer('aws_dynamodb_read_capacity', '5',
                     'Set RCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_write_capacity', '5',
                     'Set WCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_lsi_count', 0,
                     'Set amount of Local Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_lsi_count',
                         lambda value: -1 < value < 6,
                         message='--count must be from 0-5')
flags.register_validator('aws_dynamodb_use_sort',
                         lambda sort: sort or not FLAGS.aws_dynamodb_lsi_count,
                         message='--aws_dynamodb_lsi_count requires sort key.')
flags.DEFINE_integer('aws_dynamodb_gsi_count', 0,
                     'Set amount of Global Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_gsi_count',
                         lambda value: -1 < value < 6,
                         message='--count must be from 0-5')
flags.DEFINE_boolean(
    'aws_dynamodb_ycsb_consistentReads', False,
    "Consistent reads cost 2x eventual reads. "
    "'false' is default which is eventual")
flags.DEFINE_integer(
    'aws_dynamodb_connectMax', 50,
Ejemplo n.º 8
0
            return False

        if packet_size < 0:
            return False

        # verify the ip type
        if ip_type not in [
                vm_util.IpAddressSubset.EXTERNAL,
                vm_util.IpAddressSubset.INTERNAL
        ]:
            return False

    return True


flags.register_validator('ntttcp_config_list', NtttcpConfigListValidator,
                         'malformed config list')


def ParseConfigList():
    """Get the list of configs for the test from the flags."""
    if not FLAGS.ntttcp_config_list:
        # config is the empty string.
        return [
            NtttcpConf(udp=FLAGS.ntttcp_udp,
                       threads=FLAGS.ntttcp_threads,
                       time_s=FLAGS.ntttcp_time,
                       ip_type=FLAGS.ip_addresses,
                       packet_size=FLAGS.ntttcp_packet_size)
        ]

    conf_list = []
Ejemplo n.º 9
0
      vm_count: 1
      os_type: debian9
    clients:
      vm_spec:
        GCP:
          machine_type: n1-standard-2
          boot_disk_type: pd-ssd
      os_type: debian9
      vm_count: 1
"""

FLAGS = flags.FLAGS
flags.DEFINE_integer('boots_per_launcher', 1, 'Number of VMs to boot per '
                     'launcher server VM. Defaults to 1.')
flags.register_validator('boots_per_launcher',
                         lambda value: 1 <= value <= 1000,
                         message='The number of VMs booted by each launcher '
                         'should be between 1 and 1000.')
flags.DEFINE_string('boot_os_type', 'debian9', 'OS to boot on the VMs. '
                    'Defaults to debian9. OS on launcher server VM is set '
                    'using os_type flag.')
flags.DEFINE_string('boot_machine_type', 'n1-standard-2', 'Machine type to boot'
                    'on the VMs. Defaults to n1-standard-2. Set machine type '
                    'on launcher server VM with launcher_machine_type flag.')
flags.DEFINE_string('launcher_machine_type', 'n1-standard-16', 'Machine type '
                    'to launcher the VMs. Defaults to n1-standard-16. Set '
                    'machine type on boot VMs with boot_machine_type flag.')

# remote tmp directory used for this benchmark.
_REMOTE_DIR = vm_util.VM_TMP_DIR
# boot script to use on the launcher server vms.
_BOOT_SCRIPT = 'boot_script.sh'
                     'Number of seconds to run the test.')
flags.DEFINE_boolean('stress_ng_calc_geomean', True,
                     'Whether to calculate geomean or not.')
flags.DEFINE_list(
    'stress_ng_custom_stressors', [],
    'List of stressors to run against. Default combines cpu,'
    'cpu-cache, and memory suites')

ALL_WORKLOADS = ['small', 'medium', 'large']
flags.DEFINE_list(
    'stress_ng_thread_workloads', ['large'],
    'List of threads sizes to run against. Options are'
    'small (1 thread total), medium (1 thread per 2 cpus), and '
    'large (1 thread per cpu).')
flags.register_validator(
    'stress_ng_thread_workloads',
    lambda workloads: workloads and set(workloads).issubset(ALL_WORKLOADS))


def _GeoMeanOverflow(iterable):
    """Returns the geometric mean.

  See https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms

  Args:
    iterable: a list of positive floats to take the geometric mean of.

  Returns: The geometric mean of the list.
  """
    a = numpy.log(iterable)
    return numpy.exp(a.sum() / len(a))
Ejemplo n.º 11
0
import json
import logging
import os
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import netperf
from six.moves import range

flags.DEFINE_list('bidirectional_network_tests',
                  ['TCP_STREAM', 'TCP_MAERTS', 'TCP_MAERTS'],
                  'The network tests to run.')
flags.register_validator(
    'bidirectional_network_tests',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_TESTS))

flags.DEFINE_integer('bidirectional_network_test_length', 60,
                     'bidirectional_network test length, in seconds',
                     lower_bound=1)
flags.DEFINE_integer('bidirectional_stream_num_streams', 8,
                     'Number of netperf processes to run.',
                     lower_bound=1)

ALL_TESTS = ['TCP_STREAM', 'TCP_MAERTS']

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'bidirectional_network'
BENCHMARK_CONFIG = """
Ejemplo n.º 12
0
        if option == MEASUREMENTS_NONE and len(options_list) != 1:
            raise flags.ValidationError(
                '%s: Cannot combine with other --%s options' %
                (option, MEASUREMENTS_FLAG_NAME))
    return True


flags.DEFINE_list(
    MEASUREMENTS_FLAG_NAME, MEASUREMENTS_END_TO_END_RUNTIME,
    'Comma-separated list of values from <%s> that selects which timing '
    'measurements to enable. Measurements will be included as samples in the '
    'benchmark results. %s' % ('|'.join(MEASUREMENTS_ALL), ' '.join([
        '%s: %s' % (option, description)
        for option, description in MEASUREMENTS_ALL.iteritems()
    ])))
flags.register_validator(MEASUREMENTS_FLAG_NAME, ValidateMeasurementsFlag)


def _GenerateIntervalSamples(interval, include_timestamps):
    """Generates Samples for a single interval timed by IntervalTimer.Measure.

  Args:
    interval: A (name, start_time, stop_time) tuple from a call to
      IntervalTimer.Measure.
    include_timestamps: A Boolean that controls whether Samples containing the
      start and stop timestamps are added to the generated list.

  Returns:
    A list of 0 to 3 Samples as specified by the args. When included, the
    Samples appear in the order of runtime, start timestamp, stop timestamp.
  """
def _ValidateLoadConfigs(load_configs):
  """Validate that each load config has all required values."""
  if not load_configs:
    return False
  for config in load_configs:
    config_values = config.split(':')
    if len(config_values) != 4:
      return False
    for value in config_values:
      if not (value.isdigit() and int(value) > 0):
        return False
  return True


flags.register_validator(
    'nginx_load_configs', _ValidateLoadConfigs,
    'Malformed load config. ' + _FLAG_FORMAT_DESCRIPTION)

BENCHMARK_NAME = 'nginx'
BENCHMARK_CONFIG = """
nginx:
  description: Benchmarks Nginx server performance.
  vm_groups:
    clients:
      vm_spec: *default_single_core
      vm_count: null
    server:
      vm_spec: *default_single_core
"""

Ejemplo n.º 14
0
# The names of the benchmarks.
HPCC_BENCHMARKS = sorted(HPCC_METRIC_MAP)


flags.DEFINE_enum(
    'hpcc_math_library', HPCC_MATH_LIBRARY_OPEN_BLAS,
    [HPCC_MATH_LIBRARY_OPEN_BLAS, HPCC_MATH_LIBRARY_MKL],
    'The math library to use when compiling hpcc: openblas or mkl. '
    'The default is openblas.')
flags.DEFINE_list(
    'hpcc_benchmarks', [], 'A list of benchmarks in HPCC to run. If none are '
    'specified (the default), then all of the benchmarks are run. In 1.5.0, '
    'the benchmarks may include the following: %s' % ', '.join(HPCC_BENCHMARKS))
flags.register_validator(
    'hpcc_benchmarks',
    lambda hpcc_benchmarks: set(hpcc_benchmarks).issubset(set(HPCC_BENCHMARKS)))
FLAGS = flags.FLAGS


def _LimitBenchmarksToRun(vm, selected_hpcc_benchmarks):
  """Limits the benchmarks to run.

  This function copies hpcc.c to the local machine, comments out code that runs
  benchmarks not listed in selected_hpcc_benchmarks, and then copies hpcc.c back
  to the remote machine.

  Args:
    vm: The machine where hpcc.c was installed.
    selected_hpcc_benchmarks: A set of benchmarks to run.
  """
Ejemplo n.º 15
0
          boot_disk_size: 200
        Azure:
          machine_type: Standard_NC6
          zone: eastus
"""

GPU = 'gpu'
CPU = 'cpu'

MODELS = ['alexnet', 'googlenet', 'inception-bn', 'inception-resnet-v2',
          'inception-v3', 'inception-v4', 'lenet', 'mlp', 'mobilenet',
          'resnet-v1', 'resnet', 'resnext', 'vgg']
flags.DEFINE_list('mx_models', ['inception-v3', 'vgg', 'alexnet', 'resnet'],
                  'The network to train')
flags.register_validator('mx_models',
                         lambda models: models and set(models).issubset(MODELS),
                         'Invalid models list. mx_models must be a subset of '
                         + ', '.join(MODELS))
flags.DEFINE_integer('mx_batch_size', None, 'The batch size for SGD training.')
flags.DEFINE_integer('mx_num_epochs', 80,
                     'The maximal number of epochs to train.')
flags.DEFINE_enum('mx_device', GPU, [CPU, GPU],
                  'Device to use for computation: cpu or gpu')
flags.DEFINE_integer('mx_num_layers', None, 'Number of layers in the neural '
                     'network, required by some networks such as resnet')
flags.DEFINE_enum('mx_precision', 'float32', ['float16', 'float32'],
                  'Precision')
flags.DEFINE_enum('mx_key_value_store', 'device',
                  ['local', 'device', 'nccl', 'dist_sync', 'dist_device_sync',
                   'dist_async'], 'Key-Value store types.')
flags.DEFINE_string('mx_image_shape', None,
                    'The image shape that feeds into the network.')
Ejemplo n.º 16
0
                    'The AWS cache node type to use for elasticache clusters.')
flags.DEFINE_string('aws_elasticache_failover_zone',
                    None,
                    'AWS elasticache failover zone')
flags.DEFINE_string('aws_efs_token', None,
                    'The creation token used to create the EFS resource. '
                    'If the file system already exists, it will use that '
                    'instead of creating a new one.')
flags.DEFINE_boolean('aws_delete_file_system', True,
                     'Whether to delete the EFS file system.')
flags.DEFINE_list('eks_zones', [],
                  'DEPRECATED: Set container_cluster.vm_spec.AWS.zone instead.'
                  'The single region or multiple zones into which the EKS '
                  'cluster will be deployed. If a region is passed zones will '
                  'be decided by EKS. All zones must be from the same region.')
flags.register_validator('eks_zones',
                         util.EksZonesValidator)
flags.DEFINE_enum('efs_throughput_mode', 'provisioned',
                  ['provisioned', 'bursting'],
                  'The throughput mode to use for EFS.')
flags.DEFINE_float('efs_provisioned_throughput', 1024.0,
                   'The throughput limit of EFS (in MiB/s) when run in '
                   'provisioned mode.')
flags.DEFINE_boolean('provision_athena', False,
                     'Whether to provision the Athena database.')
flags.DEFINE_boolean('teardown_athena', True,
                     'Whether to teardown the Athena database.')
flags.DEFINE_string(
    'athena_output_location_prefix', 'athena-cli-results',
    'Prefix of the S3 bucket name for Athena Query Output. Suffix will be the '
    'region and the run URI, and the bucket will be dynamically created and '
    'deleted during the test.')
                     'Whether to calculate geomean or not.')
flags.DEFINE_list(
    'stress_ng_custom_stressors', DEFAULT_STRESSORS,
    'List of stressors to run against. Default combines cpu,'
    'cpu-cache, and memory suites')
flags.DEFINE_list('stress_ng_cpu_methods', [],
                  'List of cpu methods to run with. By default none are ran.')

ALL_WORKLOADS = ['small', 'medium', 'large']
flags.DEFINE_list(
    'stress_ng_thread_workloads', ['large'],
    'List of threads sizes to run against. Options are'
    'small (1 thread total), medium (1 thread per 2 cpus), and '
    'large (1 thread per cpu).')
flags.register_validator(
    'stress_ng_thread_workloads',
    lambda workloads: workloads and set(workloads).issubset(ALL_WORKLOADS))

ALL_VERSIONS = ['0.05.23', '0.09.25']
flags.DEFINE_enum(
    'stress_ng_version', '0.09.25', ALL_VERSIONS,
    'Stress-ng version to use. Default is 0.09.25 which '
    'is the default package on Ubuntu 1804.')


def _GeoMeanOverflow(iterable):
    """Returns the geometric mean.

  See https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms

  Args:
Ejemplo n.º 18
0
flags.DEFINE_string('aws_preprovisioned_data_bucket', None,
                    'AWS bucket where pre-provisioned data has been copied.')
flags.DEFINE_string('cache_node_type',
                    'cache.m4.large',
                    'The AWS cache node type to use for elasticache clusters.')
flags.DEFINE_string('aws_elasticache_failover_zone',
                    None,
                    'AWS elasticache failover zone')
flags.DEFINE_string('aws_efs_token', None,
                    'The creation token used to create the EFS resource. '
                    'If the file system already exists, it will use that '
                    'instead of creating a new one.')
flags.DEFINE_boolean('aws_delete_file_system', True,
                     'Whether to delete the EFS file system.')
flags.DEFINE_list('eks_zones', ['us-east-1a', 'us-east-1c'],
                  'The zones into which the EKS cluster will be deployed. '
                  'There must be at least two zones and all zones must be '
                  'from the same region.')
flags.register_validator('eks_zones',
                         util.EksZonesValidator)
flags.DEFINE_boolean('eks_verify_ssl', True,
                     'Whether to verify the ssl certificate when communicating '
                     'with the EKS service. This requires SNI support which is '
                     'not available in the SSL modules of Python < 2.7.9.')
flags.DEFINE_enum('efs_throughput_mode', 'provisioned',
                  ['provisioned', 'bursting'],
                  'The throughput mode to use for EFS.')
flags.DEFINE_float('efs_provisioned_throughput', 1024.0,
                   'The throughput limit of EFS (in MiB/s) when run in '
                   'provisioned mode.')
Ejemplo n.º 19
0
                  'Device to use for computation: cpu or gpu')
flags.DEFINE_enum('tf_data_format', 'NCHW', ['NCHW', 'NHWC'], '''Data layout to
                  use: NHWC (TF native) or NCHW (cuDNN native).''')
flags.DEFINE_boolean('tf_use_nccl', True,
                     'Whether to use nccl all-reduce primitives where possible')
flags.DEFINE_boolean('tf_distortions', True,
                     '''Enable/disable distortions during image preprocessing.
                     These include bbox and color distortions.''')


def LocalParameterDeviceValidator(value):
  if FLAGS.tf_device == CPU:
    return value == CPU
  return True

flags.register_validator('tf_local_parameter_device',
                         LocalParameterDeviceValidator)


DEFAULT_BATCH_SIZE = 64
DEFAULT_BATCH_SIZES_BY_MODEL = {
    'vgg16': 32,
    'alexnet': 512,
    'restnet152': 32,
}


class TFParseOutputException(Exception):
  pass


def GetConfig(user_config):
    if time_s < 1:
      return False

    if packet_size < 0:
      return False

    # verify the ip type
    if ip_type not in [
        vm_util.IpAddressSubset.EXTERNAL, vm_util.IpAddressSubset.INTERNAL
    ]:
      return False

  return True


flags.register_validator('ntttcp_config_list', NtttcpConfigListValidator,
                         'malformed config list')


def ParseConfigList():
  """Get the list of configs for the test from the flags."""
  if not FLAGS.ntttcp_config_list:
    # config is the empty string.
    return [
        NtttcpConf(
            udp=FLAGS.ntttcp_udp,
            threads=FLAGS.ntttcp_threads,
            time_s=FLAGS.ntttcp_time,
            ip_type=FLAGS.ip_addresses,
            packet_size=FLAGS.ntttcp_packet_size)
    ]
Ejemplo n.º 21
0
      block_size = int(config_vals[2])
    except ValueError:
      return False

    if is_random_access not in TRUE_VALS + FALSE_VALS:
      return False

    if is_read not in TRUE_VALS + FALSE_VALS:
      return False

    if block_size <= 0:
      return False
  return True


flags.register_validator('diskspd_config_list', DiskspdConfigListValidator,
                         'malformed config list')


def ParseConfigList():
  """Get the list of configs for the test from the flags."""
  conf_list = []
  for config in FLAGS.diskspd_config_list:
    confs = config.split(':')

    conf_list.append(
        DiskspdConf(
            access_pattern='r' if (confs[0] in TRUE_VALS) else 's',
            write_ratio=0 if (confs[1] in TRUE_VALS) else 100,
            block_size=int(confs[2])))

  return conf_list
Ejemplo n.º 22
0
GPU = 'gpu'
CPU = 'cpu'
NCHW = 'NCHW'
NHWC = 'NHWC'
PID_PREFIX = 'TF_PS_PID'
MODELS = ['vgg11', 'vgg16', 'vgg19', 'lenet', 'googlenet', 'overfeat',
          'alexnet', 'trivial', 'inception3', 'inception4', 'resnet50',
          'resnet101', 'resnet152']

flags.DEFINE_boolean('tf_forward_only', False, '''whether use forward-only or
                     training for benchmarking''')
flags.DEFINE_list('tf_models', ['inception3', 'vgg16', 'alexnet', 'resnet50'],
                  'name of the models to run')
flags.register_validator('tf_models',
                         lambda models: models and set(models).issubset(MODELS),
                         'Invalid models list. tf_models must be a subset of '
                         + ', '.join(MODELS))
flags.DEFINE_enum('tf_data_name', 'imagenet', ['imagenet', 'flowers'],
                  'Name of dataset: imagenet or flowers.')
flags.DEFINE_integer('tf_batch_size', None, 'batch size per compute device. '
                     'If not provided, the suggested batch size is used for '
                     'the given model')
flags.DEFINE_enum('tf_variable_update', 'parameter_server',
                  ['parameter_server', 'replicated',
                   'distributed_replicated', 'independent'],
                  '''The method for managing variables: parameter_server,
                  replicated, distributed_replicated, independent''')
flags.DEFINE_enum('tf_local_parameter_device', CPU, [CPU, GPU],
                  '''Device to use as parameter server: cpu or gpu. For
                  distributed training, it can affect where caching of
                  variables happens.''')
flags.DEFINE_boolean('managed_db_high_availability', False,
                     'Specifies if the database should be high availability')
flags.DEFINE_boolean('managed_db_backup_enabled', True,
                     'Whether or not to enable automated backups')
flags.DEFINE_string(
    'managed_db_backup_start_time', '07:00',
    'Time in UTC that automated backups (if enabled) '
    'will be scheduled. In the form HH:MM UTC. '
    'Defaults to 07:00 UTC')
flags.DEFINE_string(
    'managed_db_zone', None, 'zone or region to launch the database in. '
    'Defaults to the client vm\'s zone.')

BACKUP_TIME_REGULAR_EXPRESSION = '^\d\d\:\d\d$'
flags.register_validator(
    'managed_db_backup_start_time',
    lambda value: re.search(BACKUP_TIME_REGULAR_EXPRESSION, value) is not None,
    message=('--database_backup_start_time must be in the form HH:MM'))

MYSQL = 'mysql'
POSTGRES = 'postgres'
AURORA_POSTGRES = 'aurora-postgresql'

_MANAGED_RELATIONAL_DB_REGISTRY = {}
FLAGS = flags.FLAGS

# TODO: Implement DEFAULT BACKUP_START_TIME for instances.


class ManagedRelationalDbPropertyNotSet(Exception):
    pass
Ejemplo n.º 24
0
def _ValidateLoadConfigs(load_configs):
    """Validate that each load config has all required values."""
    if not load_configs:
        return False
    for config in load_configs:
        config_values = config.split(':')
        if len(config_values) != 4:
            return False
        for value in config_values:
            if not (value.isdigit() and int(value) > 0):
                return False
    return True


flags.register_validator('nginx_load_configs', _ValidateLoadConfigs,
                         'Malformed load config. ' + _FLAG_FORMAT_DESCRIPTION)

BENCHMARK_NAME = 'nginx'
BENCHMARK_CONFIG = """
nginx:
  description: Benchmarks Nginx server performance.
  vm_groups:
    clients:
      vm_spec: *default_single_core
      vm_count: null
    server:
      vm_spec: *default_single_core
"""


def GetConfig(user_config):
Ejemplo n.º 25
0
# The names of the benchmarks.
HPCC_BENCHMARKS = sorted(HPCC_METRIC_MAP)


flags.DEFINE_enum(
    'hpcc_math_library', HPCC_MATH_LIBRARY_OPEN_BLAS, [
        HPCC_MATH_LIBRARY_OPEN_BLAS, HPCC_MATH_LIBRARY_MKL,
        HPCC_MATH_LIBRARY_AMD_BLIS
    ], 'The math library to use when compiling hpcc: openblas, mkl, or '
    'amdblis. The default is openblas.')
flags.DEFINE_list(
    'hpcc_benchmarks', [], 'A list of benchmarks in HPCC to run. If none are '
    'specified (the default), then all of the benchmarks are run. In 1.5.0, '
    'the benchmarks may include the following: %s' % ', '.join(HPCC_BENCHMARKS))
flags.register_validator(
    'hpcc_benchmarks',
    lambda hpcc_benchmarks: set(hpcc_benchmarks).issubset(set(HPCC_BENCHMARKS)))
FLAGS = flags.FLAGS


def _LimitBenchmarksToRun(vm, selected_hpcc_benchmarks):
  """Limits the benchmarks to run.

  This function copies hpcc.c to the local machine, comments out code that runs
  benchmarks not listed in selected_hpcc_benchmarks, and then copies hpcc.c back
  to the remote machine.

  Args:
    vm: The machine where hpcc.c was installed.
    selected_hpcc_benchmarks: A set of benchmarks to run.
  """
      block_size = int(config_vals[2])
    except ValueError:
      return False

    if is_random_access not in TRUE_VALS + FALSE_VALS:
      return False

    if is_read not in TRUE_VALS + FALSE_VALS:
      return False

    if block_size <= 0:
      return False
  return True


flags.register_validator('diskspd_config_list', DiskspdConfigListValidator,
                         'malformed config list')


def ParseConfigList():
  """Get the list of configs for the test from the flags."""
  conf_list = []

  if FLAGS.diskspd_config_list is None:
    return [
        DiskspdConf(
            access_pattern=FLAGS.diskspd_access_pattern,
            write_ratio=FLAGS.diskspd_write_read_ratio,
            block_size=FLAGS.diskspd_block_size)
    ]

  for config in FLAGS.diskspd_config_list:
                             'Number of netperf processes to run. Netperf '
                             'will run once for each value in the list.',
                             module_name=__name__)
flags.DEFINE_integer('netperf_thinktime', 0,
                     'Time in nanoseconds to do work for each request.')
flags.DEFINE_integer('netperf_thinktime_array_size', 0,
                     'The size of the array to traverse for thinktime.')
flags.DEFINE_integer('netperf_thinktime_run_length', 0,
                     'The number of contiguous numbers to sum at a time in the '
                     'thinktime array.')

ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR']
flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS,
                  'The netperf benchmark(s) to run.')
flags.register_validator(
    'netperf_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_BENCHMARKS))

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'netperf'
BENCHMARK_CONFIG = """
netperf:
  description: Run TCP_RR, TCP_CRR, UDP_RR and TCP_STREAM
  vm_groups:
    vm_1:
      vm_spec: *default_single_core
    vm_2:
      vm_spec: *default_single_core
"""
from perfkitbenchmarker.linux_packages import gluster

FLAGS = flags.FLAGS
BENCHMARKS = ['VDI', 'DATABASE', 'SWBUILD', 'VDA']

flags.DEFINE_string(
    'specsfs2014_config', None,
    'This flag can be used to specify an alternate SPEC config file to use. '
    'If this option is specified, none of the other benchmark specific flags '
    'which operate on the config file will be used (since the default config '
    'file will be replaced by this one).')
flags.DEFINE_list('specsfs2014_benchmarks', BENCHMARKS,
                  'The SPEC SFS 2014 benchmarks to run.')
flags.register_validator(
    'specsfs2014_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(BENCHMARKS),
    'Invalid benchmarks list. specsfs2014_benchmarks must be a subset of ' +
    ', '.join(BENCHMARKS))
flag_util.DEFINE_integerlist(
    'specsfs2014_load', [1],
    'The starting load in units of SPEC "business metrics". The meaning of '
    'business metric varies depending on the SPEC benchmark (e.g. VDI has '
    'load measured in virtual desktops).', module_name=__name__)
flags.DEFINE_integer(
    'specsfs2014_incr_load', 1,
    'The amount to increment "load" by for each run.',
    lower_bound=1)
flags.DEFINE_integer(
    'specsfs2014_num_runs', 1,
    'The total number of SPEC runs. The load for the nth run is '
    '"load" + n * "specsfs_incr_load".',