コード例 #1
0
        GCP:
          machine_type: n1-standard-4
          boot_disk_size: 500
        AWS:
          machine_type: m4.xlarge
      vm_count: 2
"""

TERAGEN = 'teragen'
TERASORT = 'terasort'
TERAVALIDATE = 'teravalidate'

flags.DEFINE_integer('terasort_num_rows', 10000,
                     'Number of 100-byte rows to generate.')
flags.DEFINE_string(
    'terasort_unsorted_dir', 'tera_gen_data', 'Location of '
    'the unsorted data. TeraGen writes here, and TeraSort '
    'reads from here.')

flags.DEFINE_string(
    'terasort_data_base', 'terasort_data/',
    'The benchmark will append to this to create three '
    'directories: one for the generated, unsorted data, '
    'one for the sorted data, and one for the validate '
    'data.  If using a static cluster or if using object '
    'storage buckets, you must cleanup.')
flags.DEFINE_bool(
    'terasort_append_timestamp', True, 'Append a timestamp to '
    'the directories given by terasort_unsorted_dir, '
    'terasort_sorted_dir, and terasort_validate_dir')

FLAGS = flags.FLAGS
コード例 #2
0
"""Module containing redis installation and cleanup functions."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_packages
from six.moves import range

flags.DEFINE_integer('redis_total_num_processes',
                     1,
                     'Total number of redis server processes.',
                     lower_bound=1)
flags.DEFINE_boolean('redis_enable_aof', False,
                     'Enable append-only file (AOF) with appendfsync always.')
flags.DEFINE_string('redis_server_version', '5.0.5',
                    'Version of redis server to use.')

REDIS_FIRST_PORT = 6379
REDIS_PID_FILE = 'redis.pid'
FLAGS = flags.FLAGS
REDIS_GIT = 'https://github.com/antirez/redis.git'


def _GetRedisTarName():
    return 'redis-%s.tar.gz' % FLAGS.redis_server_version


def GetRedisDir():
    return '%s/redis' % linux_packages.INSTALL_DIR

コード例 #3
0
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import stages
from perfkitbenchmarker.linux_packages import build_tools

BASE_MODE = 'base'
PEAK_MODE = 'peak'
ALL_MODE = 'all'

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'runspec_tar', None,
    'Used by the PKB speccpu benchmarks. Name of the .tgz file to use. '
    'Defaults to None. ')
flags.DEFINE_string(
    'runspec_config', None,
    'Used by the PKB speccpu benchmarks. Name of the cfg file to use as the '
    'SPEC CPU config file provided to the runspec binary via its --config '
    'flag. If the benchmark is run using an .iso file, then the '
    'cfg file must be placed in the local PKB data directory and will be '
    'copied to the remote machine prior to executing runspec/runcpu. Defaults '
    'to None. '
    'See README.md for instructions if running with a repackaged .tgz file.')
flags.DEFINE_string(
    'runspec_build_tool_version', None,
    'Version of gcc/g++/gfortran. This should match runspec_config. Note, if '
    'neither runspec_config and runspec_build_tool_version is set, the test '
    'install gcc/g++/gfortran-4.7, since that matches default config version. '
コード例 #4
0
        AWS:
          machine_type: p3dn.24xlarge
          zone: us-east-1a
          boot_disk_size: 105
          image: ami-0a4a0d42e3b855a2c
        Azure:
          machine_type: Standard_ND40s_v2
          zone: eastus
          boot_disk_size: 105
"""

flags.DEFINE_enum('mlperf_benchmark', 'resnet',
                  ['resnet', 'transformer', 'mask', 'gnmt', 'ssd', 'minigo'],
                  'MLPerf benchmark test to run.')
flags.DEFINE_string(
    'mlperf_gcs_resnet_checkpoint',
    'gs://cloud-tpu-artifacts/resnet/resnet-nhwc-2018-02-07/model.ckpt-112603',
    'A ResNet backbone trained on the ImageNet dataset.')
flags.DEFINE_string(
    'mlperf_transformer_decode_dir', '', 'Transformer decode directory')
flags.DEFINE_string('wmt_data_dir',
                    'gs://pkb-sgpyc-us-west1/mlperf_v0.6_nv_transformer/',
                    'Directory where the wmt dataset is stored')
flags.DEFINE_string('coco_data_dir', 'gs://pkb-sgpyc-us-west1/coco2017/',
                    'Directory where the coco dataset is stored')
flags.DEFINE_string('gnmt_data_dir',
                    'gs://pkb-sgpyc-us-west1/mlperf_v0.6_nv_gnmt/',
                    'Directory where the nv v0.6 WMT dataset is stored')
flags.DEFINE_string('minigo_model_dir', None,
                    'Directory on GCS to copy minigo source data from. Files '
                    'will be copied from subdirectories of src_dir '
                    'corresponding to the board size.')
コード例 #5
0
flags.DEFINE_enum(
    'tf_local_parameter_device', CPU, [CPU, GPU],
    '''Device to use as parameter server: cpu or gpu. For
                  distributed training, it can affect where caching of
                  variables happens.''')
flags.DEFINE_enum('tf_device', GPU, [CPU, GPU],
                  'Device to use for computation: cpu or gpu')
flags.DEFINE_enum(
    'tf_data_format', NCHW, [NCHW, NHWC], '''Data layout to
                  use: NHWC (TF native) or NCHW (cuDNN native).''')
flags.DEFINE_boolean(
    'tf_distortions', True,
    '''Enable/disable distortions during image preprocessing.
                     These include bbox and color distortions.''')
flags.DEFINE_boolean('tf_distributed', False, 'Run TensorFlow distributed')
flags.DEFINE_string('tf_distributed_port', '2222',
                    'The port to use in TensorFlow distributed job')
flags.DEFINE_enum(
    'tf_precision', FP32, [FP16, FP32],
    'Use 16-bit floats for certain tensors instead of 32-bit '
    'floats. This is currently experimental.')
flags.DEFINE_string(
    'tf_benchmark_args', None,
    'Arguments (as a string) to pass to tf_cnn_benchmarks. '
    'This can be used to run a benchmark with arbitrary '
    'parameters. Arguments will be parsed and added to the '
    'sample metadata. For example, '
    '--tf_benchmark_args="--nodistortions --optimizer=sgd '
    'will run tf_cnn_benchmarks.py '
    '--nodistortions --optimizer=sgd '
    'and put the following in the metadata: '
    '{\'nodistortions\': \'True\', \'optimizer\': \'sgd\'}. '
コード例 #6
0
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing TensorFlow models installation and cleanup functions."""
from perfkitbenchmarker import flags

FLAGS = flags.FLAGS
TF_MODELS_GIT = 'https://github.com/tensorflow/models.git'

flags.DEFINE_string('tensorflow_models_commit_hash',
                    '57e075203f8fba8d85e6b74f17f63d0a07da233a',
                    'git commit hash of desired TensorFlow models commit.')


def Install(vm):
    """Installs TensorFlow models on the VM."""
    vm.InstallPackages('git')
    vm.RemoteCommand('git clone {}'.format(TF_MODELS_GIT), should_log=True)
    vm.RemoteCommand('cd models && git checkout {}'.format(
        FLAGS.tensorflow_models_commit_hash))


def Uninstall(vm):
    """Uninstalls TensorFlow models on the VM."""
    vm.RemoteCommand('rm -rf models', should_log=True)
コード例 #7
0
"""Run NCCL benchmarks."""

import re
import time
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util

flags.DEFINE_integer('nccl_np', 16, 'Number of processes to run')
flags.DEFINE_integer('nccl_slots', 8,
                     'Launch n processes per node on all allocated nodes')
flags.DEFINE_string(
    'nccl_cuda_visible_devices', None, 'GPU identifiers are '
    'given as integer indices or as UUID strings.')
flags.DEFINE_list('nccl_extra_params', None, 'Export an environment variable')
flags.DEFINE_string('nccl_minbytes', '8', 'Minimum size to start with')
flags.DEFINE_string('nccl_maxbytes', '256M', 'Maximum size to start with')
flags.DEFINE_integer('nccl_stepfactor', 2,
                     'Multiplication factor between sizes')
flags.DEFINE_integer('nccl_ngpus', 1, 'Number of gpus per thread.')
flags.DEFINE_boolean('nccl_check', False, 'Check correctness of results.')
flags.DEFINE_integer('nccl_nthreads', 1, 'Number of threads per process')
flags.DEFINE_integer('nccl_num_runs',
                     10,
                     'The number of consecutive run.',
                     lower_bound=1)
flags.DEFINE_integer('nccl_seconds_between_runs', 10,
                     'Sleep between consecutive run.')
コード例 #8
0
    return lock


copy_reg.pickle(thread.LockType, PickleLock)

SUPPORTED = 'strict'
NOT_EXCLUDED = 'permissive'
SKIP_CHECK = 'none'

FLAGS = flags.FLAGS

flags.DEFINE_enum('cloud', providers.GCP, providers.VALID_CLOUDS,
                  'Name of the cloud to use.')
flags.DEFINE_string(
    'scratch_dir', None,
    'Base name for all scratch disk directories in the VM. '
    'Upon creation, these directories will have numbers '
    'appended to them (for example /scratch0, /scratch1, etc).')
flags.DEFINE_enum(
    'benchmark_compatibility_checking', SUPPORTED,
    [SUPPORTED, NOT_EXCLUDED, SKIP_CHECK],
    'Method used to check compatibility between the benchmark '
    ' and the cloud.  ' + SUPPORTED + ' runs the benchmark only'
    ' if the cloud provider has declared it supported. ' + NOT_EXCLUDED +
    ' runs the benchmark unless it has been'
    ' declared not supported by the cloud provider. ' + SKIP_CHECK +
    ' does not do the compatibility'
    ' check.')


class BenchmarkSpec(object):
コード例 #9
0
    worker_group:
      vm_spec:
        GCP:
          machine_type: n1-standard-4
        AWS:
          machine_type: m5.xlarge
      disk_spec:
        GCP:
          disk_size: 1000
          disk_type: pd-standard
        AWS:
          disk_size: 1000
          disk_type: gp2
    worker_count: 2
"""
flags.DEFINE_string('dpb_sparksql_data', None,
                    'The dataset to run Spark SQL query')
flags.DEFINE_enum('dpb_sparksql_query', 'tpcds_2_4', ['tpcds_2_4', 'tpch'],
                  'A list of query to run on dpb_sparksql_data')
flags.DEFINE_list(
    'dpb_sparksql_order', [],
    'The names (numbers) of the queries to run in order. '
    'If omitted all queries are run in lexographic order.')
flags.DEFINE_string(
    'spark_bigquery_connector',
    'gs://spark-lib/bigquery/spark-bigquery-latest.jar',
    'The Spark BigQuery Connector jar to pass to the Spark Job')
flags.DEFINE_list(
    'bigquery_tables', [],
    'A list of BigQuery tables to load as Temporary Spark SQL views instead '
    'of reading from external Hive tables.')
flags.DEFINE_string(
コード例 #10
0
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working with Rackspace Cloud Platform resources."""

import os
import re

from perfkitbenchmarker import errors
from perfkitbenchmarker import flags

flags.DEFINE_boolean(
    'boot_from_cbs_volume', 'False',
    'When flag is included the instance will use a remote disk'
    'as its boot disk, if machine_type supports it.')

flags.DEFINE_string('nova_path', 'nova',
                    'The path for the rackspace-novaclient tool.')

flags.DEFINE_string('neutron_path', 'neutron',
                    'The path for the rackspace-neutronclient tool.')

flags.DEFINE_list('additional_rackspace_flags', [],
                  'Additional flags to pass to Rackspace.')

FLAGS = flags.FLAGS

PROPERTY_VALUE_ROW_REGEX = r'\|\s+(:?\S+\s\S+|\S+)\s+\|\s+(.*?)\s+\|'
PROP_VAL_PATTERN = re.compile(PROPERTY_VALUE_ROW_REGEX)


def ParseNovaTable(output):
    """Returns a dict with key/values returned from a Nova CLI formatted table.
コード例 #11
0
flags.DEFINE_enum('storage', benchmark_spec_class.GCP,
                  [benchmark_spec_class.GCP, benchmark_spec_class.AWS,
                   benchmark_spec_class.AZURE],
                  'storage provider (GCP/AZURE/AWS) to use.')

flags.DEFINE_enum('object_storage_scenario', 'all',
                  ['all', 'cli', 'api_data', 'api_namespace'],
                  'select all, or one particular scenario to run: \n'
                  'ALL: runs all scenarios. This is the default. \n'
                  'cli: runs the command line only scenario. \n'
                  'api_data: runs API based benchmarking for data paths. \n'
                  'api_namespace: runs API based benchmarking for namespace '
                  'operations.')

flags.DEFINE_string('object_storage_credential_file', None,
                    'Directory of credential file.')

flags.DEFINE_string('boto_file_location', None,
                    'The location of the boto file.')

FLAGS = flags.FLAGS

# User a scratch disk here to simulate what most users would do when they
# use CLI tools to interact with the storage provider.
BENCHMARK_INFO = {'name': 'object_storage_service',
                  'description':
                  'Object/blob storage service benchmarks. Specify '
                  '--object_storage_scenario '
                  'to select a set of sub-benchmarks to run. default is all.',
                  'scratch_disk': True,
                  'num_machines': 1}
コード例 #12
0
"""Module containing TensorFlow Serving installation functions.

"""
import posixpath
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import vm_util

VM_TMP_DIR = vm_util.VM_TMP_DIR
TF_SERVING_BASE_DIRECTORY = posixpath.join(linux_packages.INSTALL_DIR,
                                           'serving')

FLAGS = flags.FLAGS

# Versions supported including TF Serving 1.
flags.DEFINE_string('tf_serving_branch', 'r1.15', 'GitHub branch to pull from')


def InstallTensorFlowServingAPI(vm):
    """Installs TF Serving API on the vm.

  Currently this is only useful so that the clients can run python
  scripts that import tensorflow_serving. The server vms make no use
  of it.

  Args:
    vm: VM to operate on.
  """

    pip_package_output_dir = posixpath.join(VM_TMP_DIR,
                                            'tf_serving_pip_package')
コード例 #13
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

from perfkitbenchmarker import flags

flags.DEFINE_string('CS_API_URL', os.environ.get('CS_API_URL'),
                    'API endpoint for Cloudstack.')

flags.DEFINE_string('CS_API_KEY', os.environ.get('CS_API_KEY'),
                    'Key for API authentication')

flags.DEFINE_string('CS_API_SECRET', os.environ.get('CS_API_SECRET'),
                    'Secret for API authentication')

flags.DEFINE_string('cs_network_offering',
                    'DefaultIsolatedNetworkOfferingForVpcNetworksNoLB',
                    'Name of the network offering')

flags.DEFINE_string('cs_vpc_offering', 'Default VPC offering',
                    'Name of the VPC offering')

flags.DEFINE_boolean('cs_use_vpc', True, 'Use VPC to create networks')
コード例 #14
0
    'trivial', 'inception3', 'inception4', 'resnet50', 'resnet101', 'resnet152'
]
FP16 = 'float16'
FP32 = 'float32'

flags.DEFINE_boolean(
    'tf_forward_only', False, '''whether use forward-only or
                     training for benchmarking''')
flags.DEFINE_list('tf_models',
                  ['inception3', 'vgg16', 'alexnet', 'resnet50', 'resnet152'],
                  'name of the models to run')
flags.register_validator(
    'tf_models', lambda models: models and set(models).issubset(MODELS),
    'Invalid models list. tf_models must be a subset of ' + ', '.join(MODELS))
flags.DEFINE_string(
    'tf_data_dir', None, 'Path to dataset in TFRecord format (aka Example '
    'protobufs). If not specified, synthetic data will be '
    'used.')
flags.DEFINE_string('tf_data_module', 'tensorflow/ILSVRC2012',
                    'Data path in preprovisioned data bucket.')
flags.DEFINE_integer('tf_num_files_train', 1024,
                     'The number of files for training')
flags.DEFINE_integer('tf_num_files_val', 128,
                     'The number of files for validation')
flags.DEFINE_enum('tf_data_name', 'imagenet', ['imagenet', 'flowers'],
                  'Name of dataset: imagenet or flowers.')
flags.DEFINE_list(
    'tf_batch_sizes', None, 'batch sizes per compute device. '
    'If not provided, the suggested batch size is used for '
    'the given model')
flags.DEFINE_enum(
    'tf_variable_update', 'parameter_server', [
コード例 #15
0
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on AWS."""

from perfkitbenchmarker import flags
from perfkitbenchmarker.providers.aws import util

flags.DEFINE_string(
    'aws_user_name', '', 'This determines the user name that Perfkit will '
    'attempt to use. Defaults are OS specific.')
flags.DEFINE_integer('aws_provisioned_iops', None,
                     'IOPS for Provisioned IOPS (SSD) volumes in AWS.')

flags.DEFINE_string('aws_dax_node_type', 'dax.r4.large',
                    'The node type used for creating AWS DAX cluster.')
flags.DEFINE_integer('aws_dax_replication_factor', 3,
                     'The replication factor of AWS DAX cluster.')
flags.DEFINE_string(
    'aws_emr_loguri', None,
    'The log-uri parameter to pass to AWS when creating a '
    'cluster.  If not set, a bucket will be created.')
flags.DEFINE_integer('aws_emr_job_wait_time', 18000,
                     'The time to wait for an EMR job to finish, in seconds')
flags.DEFINE_boolean('aws_spot_instances', False,
コード例 #16
0
import json
import logging
import os
import pipes
import posixpath
import subprocess

from perfkitbenchmarker import data
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import hbase
from perfkitbenchmarker.packages import ycsb

FLAGS = flags.FLAGS

flags.DEFINE_string('google_bigtable_endpoint', 'bigtable.googleapis.com',
                    'Google API endpoint for Cloud Bigtable.')
flags.DEFINE_string(
    'google_bigtable_admin_endpoint', 'bigtabletableadmin.googleapis.com',
    'Google API endpoint for Cloud Bigtable table '
    'administration.')
flags.DEFINE_string('google_bigtable_zone_name', 'us-central1-b',
                    'Bigtable zone.')
flags.DEFINE_string('google_bigtable_cluster_name', None,
                    'Bigtable cluster name.')
flags.DEFINE_string(
    'google_bigtable_alpn_jar_url',
    'http://central.maven.org/maven2/org/mortbay/jetty/alpn/'
    'alpn-boot/7.1.3.v20150130/alpn-boot-7.1.3.v20150130.jar',
    'URL for the ALPN boot JAR, required for HTTP2')
flags.DEFINE_string(
    'google_bigtable_hbase_jar_url',
コード例 #17
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing TensorFlow installation and cleanup functions."""
import posixpath
from perfkitbenchmarker import flags
from perfkitbenchmarker.linux_packages import cuda_toolkit

FLAGS = flags.FLAGS
flags.DEFINE_string(
    'tf_cpu_pip_package',
    'https://anaconda.org/intel/tensorflow/1.12.0/download/'
    'tensorflow-1.12.0-cp27-cp27mu-linux_x86_64.whl',
    'TensorFlow CPU pip package to install. By default, PKB '
    'will install an Intel-optimized CPU build when using '
    'CPUs.')
flags.DEFINE_string(
    'tf_gpu_pip_package', 'tensorflow-gpu==1.12.0',
    'TensorFlow GPU pip package to install. By default, PKB '
    'will install tensorflow-gpu==1.12 when using GPUs.')
flags.DEFINE_string(
    't2t_pip_package', 'tensor2tensor==1.7',
    'Tensor2Tensor pip package to install. By default, PKB '
    'will install tensor2tensor==1.7 .')
flags.DEFINE_string(
    'tf_cnn_benchmarks_branch', 'cnn_tf_v1.12_compatible',
    'TensorFlow CNN branchmarks branch that is compatible with '
    'A TensorFlow version.')
コード例 #18
0
import pprint
import sys
import time
import urllib
import uuid

from perfkitbenchmarker import flags
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import log_util
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'product_name',
    'PerfKitBenchmarker',
    'The product name to use when publishing results.')

flags.DEFINE_boolean(
    'official',
    False,
    'A boolean indicating whether results are official or not. The '
    'default is False. Official test results are treated and queried '
    'differently from non-official test results.')

flags.DEFINE_boolean(
    'hostname_metadata',
    False,
    'A boolean indicating whether to publish VM hostnames as part of sample '
    'metadata.')
コード例 #19
0
        GCP:
          disk_size: 1000
          disk_type: pd-standard
        AWS:
          disk_size: 1000
          disk_type: gp2
    worker_count: 2
"""

BENCHMARK_NAMES = {'tpcds_2_4': 'TPC-DS', 'tpch': 'TPC-H'}

SPARK_SQL = dpb_service.BaseDpbService.SPARKSQL_JOB_TYPE
PYSPARK = dpb_service.BaseDpbService.PYSPARK_JOB_TYPE

flags.DEFINE_string('dpb_sparksql_data', None,
                    'The HCFS based dataset to run Spark SQL query '
                    'against')
flags.DEFINE_enum(
    'dpb_sparksql_job_type', PYSPARK, [PYSPARK, SPARK_SQL],
    'How to trigger the query. Either with the spark-sql CLI '
    'or with a PySpark harness inside PKB.')
flags.DEFINE_bool(
    'dpb_sparksql_create_hive_tables', False,
    'Whether to load dpb_sparksql_data into external hive tables '
    'or not.')
flags.DEFINE_string(
    'dpb_sparksql_data_format', None,
    "Format of data to load. Assumed to be 'parquet' for HCFS "
    "and 'bigquery' for bigquery if unspecified.")
flags.DEFINE_enum('dpb_sparksql_query', 'tpcds_2_4', BENCHMARK_NAMES.keys(),
                  'A list of query to run on dpb_sparksql_data')
コード例 #20
0
hpcc:
  description: Runs HPCC. Specify the number of VMs with --num_vms
  vm_groups:
    default:
      vm_spec: *default_single_core
      vm_count: null
"""

SECONDS_PER_HOUR = 60 * 60

flags.DEFINE_integer(
    'memory_size_mb', None,
    'The amount of memory in MB on each machine to use. By '
    'default it will use the entire system\'s memory.')
flags.DEFINE_string(
    'hpcc_binary', None,
    'The path of prebuilt hpcc binary to use. If not provided, '
    'this benchmark built its own using OpenBLAS.')
flags.DEFINE_list(
    'hpcc_mpi_env', [],
    'Comma separated list containing environment variables '
    'to use with mpirun command. e.g. '
    'MKL_DEBUG_CPU_TYPE=7,MKL_ENABLE_INSTRUCTIONS=AVX512')
flags.DEFINE_integer(
    'hpcc_timeout_hours', 4,
    'The number of hours to wait for the HPCC binary to '
    'complete before timing out and assuming it failed.')


def GetConfig(user_config):
    return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
コード例 #21
0
six.moves.copyreg.pickle(six.moves._thread.LockType, PickleLock)

SUPPORTED = 'strict'
NOT_EXCLUDED = 'permissive'
SKIP_CHECK = 'none'
# GCP labels only allow hyphens (-), underscores (_), lowercase characters, and
# numbers and International characters.
# metadata allow all characters and numbers.
METADATA_TIME_FORMAT = '%Y%m%dt%H%M%Sz'
FLAGS = flags.FLAGS

flags.DEFINE_enum('cloud', providers.GCP, providers.VALID_CLOUDS,
                  'Name of the cloud to use.')
flags.DEFINE_string('scratch_dir', None,
                    'Base name for all scratch disk directories in the VM. '
                    'Upon creation, these directories will have numbers '
                    'appended to them (for example /scratch0, /scratch1, etc).')
flags.DEFINE_string('startup_script', None,
                    'Script to run right after vm boot.')
flags.DEFINE_string('postrun_script', None,
                    'Script to run right after run stage.')
# pyformat: disable
flags.DEFINE_enum('benchmark_compatibility_checking', SUPPORTED,
                  [SUPPORTED, NOT_EXCLUDED, SKIP_CHECK],
                  'Method used to check compatibility between the benchmark '
                  ' and the cloud.  ' + SUPPORTED + ' runs the benchmark only'
                  ' if the cloud provider has declared it supported. ' +
                  NOT_EXCLUDED + ' runs the benchmark unless it has been'
                  ' declared not supported by the cloud provider. ' + SKIP_CHECK
                  + ' does not do the compatibility'
                  ' check.')
コード例 #22
0
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on GCP."""

from perfkitbenchmarker import flags

# Sentinel value for unspecified platform.
GCP_MIN_CPU_PLATFORM_NONE = 'none'

flags.DEFINE_string('gcloud_path', 'gcloud',
                    'The path for the gcloud utility.')
flags.DEFINE_list('additional_gcloud_flags', [],
                  'Additional flags to pass to gcloud.')
flags.DEFINE_integer(
    'gce_num_local_ssds', 0,
    'The number of ssds that should be added to the VM. Note '
    'that this is currently only supported in certain zones '
    '(see https://cloud.google.com/compute/docs/local-ssd).')
flags.DEFINE_string(
    'gcloud_scopes', None, 'If set, space-separated list of '
    'scopes to apply to every created machine')
flags.DEFINE_boolean('gce_migrate_on_maintenance', True, 'If true, allow VM '
                     'migration on GCE host maintenance.')
flags.DEFINE_boolean('gce_preemptible_vms', False, 'If true, use preemptible '
                     'VMs on GCE.')
flags.DEFINE_string(
コード例 #23
0
ファイル: publisher.py プロジェクト: lmroz/PerfKitBenchmarker
import logging
import operator
import sys
import time
import uuid

from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.sample import Sample

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'product_name',
    'PerfKitBenchmarker',
    'The product name to use when publishing results.')

flags.DEFINE_boolean(
    'official',
    False,
    'A boolean indicating whether results are official or not. The '
    'default is False. Official test results are treated and queried '
    'differently from non-official test results.')

flags.DEFINE_string(
    'json_path',
    None,
    'A path to write newline-delimited JSON results '
    'Default: write to a run-specific temporary directory')
flags.DEFINE_boolean(
コード例 #24
0
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util

from perfkitbenchmarker.linux_packages import gluster

FLAGS = flags.FLAGS
BENCHMARKS = ['VDI', 'DATABASE', 'SWBUILD', 'VDA']

flags.DEFINE_string(
    'specsfs2014_config', None,
    'This flag can be used to specify an alternate SPEC config file to use. '
    'If this option is specified, none of the other benchmark specific flags '
    'which operate on the config file will be used (since the default config '
    'file will be replaced by this one).')
flags.DEFINE_list('specsfs2014_benchmarks', BENCHMARKS,
                  'The SPEC SFS 2014 benchmarks to run.')
flags.register_validator(
    'specsfs2014_benchmarks',
    lambda benchmarks: benchmarks and set(benchmarks).issubset(BENCHMARKS),
    'Invalid benchmarks list. specsfs2014_benchmarks must be a subset of ' +
    ', '.join(BENCHMARKS))
flag_util.DEFINE_integerlist(
    'specsfs2014_load', [1],
    'The starting load in units of SPEC "business metrics". The meaning of '
    'business metric varies depending on the SPEC benchmark (e.g. VDI has '
    'load measured in virtual desktops).',
    module_name=__name__)
コード例 #25
0
import six
import yaml

if six.PY2:
  import functools32 as functools
else:
  import functools

FLAGS = flags.FLAGS
CONFIG_CONSTANTS = 'default_config_constants.yaml'
FLAGS_KEY = 'flags'
IMPORT_REGEX = re.compile('^#import (.*)')

flags.DEFINE_string('benchmark_config_file', None,
                    'The file path to the user config file which will '
                    'override benchmark defaults. This should either be '
                    'a path relative to the current working directory, '
                    'an absolute path, or just the name of a file in the '
                    'configs/ directory.')
flags.DEFINE_multi_string(
    'config_override', None,
    'This flag can be used to override any config value. It is applied after '
    'the user config (specified via --benchmark_config_file_path), so it has '
    'a higher priority than that config. The value of the flag should be '
    'fully.qualified.key=value (e.g. --config_override=cluster_boot.vm_groups.'
    'default.vm_count=4).')


class _ConcatenatedFiles(object):
  """Class that presents several files as a single object.

  The class exposes a single method (read) which is all that yaml
コード例 #26
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from perfkitbenchmarker import flags

flags.DEFINE_string('rack_path',
                    default='rack',
                    help='The path for the rack CLI binary.')

flags.DEFINE_string('rackspace_region',
                    default='IAD',
                    help='A string indicating which Rackspace region to use.')

flags.DEFINE_string('rack_profile',
                    default=None,
                    help='A string indicating which RackCLI profile to use. '
                    'If none is specified default profile is used '
                    '(see https://developer.rackspace.com/docs/'
                    'rack-cli/configuration/#config-file)')

flags.DEFINE_boolean(
    'rackspace_boot_from_cbs_volume', 'False',
コード例 #27
0
ファイル: pkb.py プロジェクト: mcurtiss/PerfKitBenchmarker
from perfkitbenchmarker.publisher import SampleCollector

LOG_FILE_NAME = 'pkb.log'
COMPLETION_STATUS_FILE_NAME = 'completion_statuses.json'
REQUIRED_INFO = ['scratch_disk', 'num_machines']
REQUIRED_EXECUTABLES = frozenset(['ssh', 'ssh-keygen', 'scp', 'openssl'])
FLAGS = flags.FLAGS

flags.DEFINE_list('ssh_options', [], 'Additional options to pass to ssh.')
flags.DEFINE_list(
    'benchmarks', [benchmark_sets.STANDARD_SET],
    'Benchmarks and/or benchmark sets that should be run. The '
    'default is the standard set. For more information about '
    'benchmarks and benchmark sets, see the README and '
    'benchmark_sets.py.')
flags.DEFINE_string('archive_bucket', None,
                    'Archive results to the given S3/GCS bucket.')
flags.DEFINE_string(
    'project', None, 'GCP project ID under which '
    'to create the virtual machines')
flags.DEFINE_list(
    'zones', [], 'A list of zones within which to run PerfKitBenchmarker. '
    'This is specific to the cloud provider you are running on. '
    'If multiple zones are given, PerfKitBenchmarker will create 1 VM in '
    'zone, until enough VMs are created as specified in each '
    'benchmark. The order in which this flag is applied to VMs is '
    'undefined.')
flags.DEFINE_list(
    'extra_zones', [],
    'Zones that will be appended to the "zones" list. This is functionally '
    'the same, but allows flag matrices to have two zone axes.')
# TODO(user): note that this is currently very GCE specific. Need to create a
コード例 #28
0
# Modes for --fio_target_mode
AGAINST_FILE_WITH_FILL_MODE = 'against_file_with_fill'
AGAINST_FILE_WITHOUT_FILL_MODE = 'against_file_without_fill'
AGAINST_DEVICE_WITH_FILL_MODE = 'against_device_with_fill'
AGAINST_DEVICE_WITHOUT_FILL_MODE = 'against_device_without_fill'
AGAINST_DEVICE_MODES = {
    AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE
}
FILL_TARGET_MODES = {
    AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_FILE_WITH_FILL_MODE
}

flags.DEFINE_string(
    'fio_jobfile', None,
    'Job file that fio will use. If not given, use a job file '
    'bundled with PKB. Cannot use with '
    '--fio_generate_scenarios.')
flags.DEFINE_list(
    'fio_generate_scenarios', [],
    'Generate a job file with the given scenarios. Special '
    'scenario \'all\' generates all scenarios. Available '
    'scenarios are sequential_write, sequential_read, '
    'random_write, and random_read. Cannot use with '
    '--fio_jobfile.')
flags.DEFINE_enum(
    'fio_target_mode', AGAINST_FILE_WITHOUT_FILL_MODE, [
        AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE,
        AGAINST_FILE_WITH_FILL_MODE, AGAINST_FILE_WITHOUT_FILL_MODE
    ], 'Whether to run against a raw device or a file, and whether '
    'to prefill.')
コード例 #29
0
ファイル: disk.py プロジェクト: philipz/PerfKitBenchmarker
import abc
import logging

from perfkitbenchmarker import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec

flags.DEFINE_boolean('nfs_timeout_hard', True,
                     'Whether to use hard or soft for NFS mount.')
flags.DEFINE_integer('nfs_rsize', 1048576, 'NFS read size.')
flags.DEFINE_integer('nfs_wsize', 1048576, 'NFS write size.')
flags.DEFINE_integer('nfs_timeout', 60, 'NFS timeout.')
flags.DEFINE_integer('nfs_retries', 2, 'NFS Retries.')
flags.DEFINE_string('nfs_ip_address', None,
                    'If specified, PKB will target this ip address when '
                    'mounting NFS "disks" rather than provisioning an NFS '
                    'Service for the corresponding cloud.')
flags.DEFINE_string('nfs_directory', None,
                    'Directory to mount if using a StaticNfsService. This '
                    'corresponds to the "VOLUME_NAME" of other NfsService '
                    'classes.')
flags.DEFINE_string('smb_version', '3.0', 'SMB version.')
flags.DEFINE_list('mount_options', [],
                  'Additional arguments to supply when mounting.')
flags.DEFINE_list('fstab_options', [],
                  'Additional arguments to supply to fstab.')

FLAGS = flags.FLAGS


# These are the (deprecated) old disk type names
コード例 #30
0
"""Runs the YCSB benchmark against managed Redis services.

Spins up a cloud redis instance, runs YCSB against it, then spins it down.
"""

import logging
from perfkitbenchmarker import cloud_redis
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ycsb

FLAGS = flags.FLAGS
flags.DEFINE_string('redis_region',
                    'us-central1',
                    'The region to spin up cloud redis in')

BENCHMARK_NAME = 'cloud_redis_ycsb'

BENCHMARK_CONFIG = """
cloud_redis_ycsb:
  description: Run YCSB against cloud redis
  cloud_redis:
    redis_version: REDIS_3_2
  vm_groups:
    clients:
      vm_spec: *default_single_core
      vm_count: 2
"""

CLOUD_REDIS_CLASS_NAME = 'CloudRedis'