def testSerializeRangeNegativeStep(self): ser = flag_util.IntegerListSerializer() # keep this in old-style format -- however should not get this # tuple from the parser as the step will always have correct sign il = flag_util.IntegerList([(5, 2, 1)]) self.assertEqual(ser.serialize(il), '5-2-1') # previously serialized as 5-2--1, move to new format il = flag_util.IntegerList([(5, 2, -1)]) self.assertEqual(ser.serialize(il), '5:2:-1') # first or second value < 0 il = flag_util.IntegerList([(5, -2, -1)]) self.assertEqual(ser.serialize(il), '5:-2:-1') il = flag_util.IntegerList([(-5, 2, 1)]) self.assertEqual(ser.serialize(il), '-5:2:1')
def testRun(self, run_single_iteration_mock, query_autoboost_policy_mock, query_gpu_clock_speed_mock, query_number_of_gpus_mock, get_driver_version_mock, get_gpu_type, get_peer_to_peer_topology): get_gpu_type.return_value = 'k80' get_driver_version_mock.return_value = '123.45' query_number_of_gpus_mock.return_value = 8 query_gpu_clock_speed_mock.return_value = [100, 200] query_autoboost_policy_mock.return_value = { 'autoboost': True, 'autoboost_default': True, } benchmark_spec = mock.MagicMock() problem_sizes = [2, 3, 4] stencil2d_benchmark.FLAGS.stencil2d_problem_sizes = ( flag_util.IntegerList(problem_sizes)) expected_calls = [ mock.call(mock.ANY, size, mock.ANY, mock.ANY, mock.ANY) for size in problem_sizes ] stencil2d_benchmark.Run(benchmark_spec) run_single_iteration_mock.assert_has_calls(expected_calls, any_order=True)
def testSerialize(self): ser = flag_util.IntegerListSerializer() il = flag_util.IntegerList([1, (2, 5), 9]) self.assertEqual(ser.serialize(il), '1,2-5,9') self.assertEqual(str(il), '1,2-5,9') # previously was <perfkitbenchmarker.flag_util.IntegerList object at ...> self.assertEqual(repr(il), 'IntegerList([1,2-5,9])')
def testRun(self, run_single_iteration_mock, cuda_toolkit_mock, get_driver_version_mock): get_driver_version_mock.return_value = '123.45' cuda_toolkit_mock.return_value = 8 benchmark_spec = mock.MagicMock() problem_sizes = [2, 3, 4] stencil2d_benchmark.FLAGS.stencil2d_problem_sizes = ( flag_util.IntegerList(problem_sizes)) expected_calls = [call(ANY, size, ANY, ANY, ANY) for size in problem_sizes] stencil2d_benchmark.Run(benchmark_spec) run_single_iteration_mock.assert_has_calls(expected_calls, any_order=True)
from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import netperf flags.DEFINE_integer('netperf_max_iter', None, 'Maximum number of iterations to run during ' 'confidence interval estimation. If unset, ' 'a single iteration will be run.', lower_bound=3, upper_bound=30) flags.DEFINE_integer('netperf_test_length', 60, 'netperf test length, in seconds', lower_bound=1) flags.DEFINE_bool('netperf_enable_histograms', True, 'Determines whether latency histograms are ' 'collected/reported. Only for *RR benchmarks') flag_util.DEFINE_integerlist('netperf_num_streams', flag_util.IntegerList([1]), 'Number of netperf processes to run. Netperf ' 'will run once for each value in the list.', module_name=__name__) flags.DEFINE_integer('netperf_thinktime', 0, 'Time in nanoseconds to do work for each request.') flags.DEFINE_integer('netperf_thinktime_array_size', 0, 'The size of the array to traverse for thinktime.') flags.DEFINE_integer('netperf_thinktime_run_length', 0, 'The number of contiguous numbers to sum at a time in the ' 'thinktime array.') ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR'] flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS, 'The netperf benchmark(s) to run.') flags.register_validator(
[AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE, AGAINST_FILE_WITH_FILL_MODE, AGAINST_FILE_WITHOUT_FILL_MODE], 'Whether to run against a raw device or a file, and whether ' 'to prefill.') flags.DEFINE_string('fio_fill_size', '100%', 'The amount of device to fill in prepare stage. ' 'The valid value can either be an integer, which ' 'represents the number of bytes to fill or a ' 'percentage, which represents the percentage ' 'of the device. A filesystem will be unmounted before ' 'filling and remounted afterwards. Only valid when ' '--fio_target_mode is against_device_with_fill or ' 'against_file_with_fill.') flag_util.DEFINE_integerlist('fio_io_depths', flag_util.IntegerList([1]), 'IO queue depths to run on. Can specify a single ' 'number, like --fio_io_depths=1, a range, like ' '--fio_io_depths=1-4, or a list, like ' '--fio_io_depths=1-4,6-8', on_nonincreasing=flag_util.IntegerListParser.WARN) flag_util.DEFINE_integerlist('fio_num_jobs', flag_util.IntegerList([1]), 'Number of concurrent fio jobs to run.', on_nonincreasing=flag_util.IntegerListParser.WARN) flags.DEFINE_integer('fio_working_set_size', None, 'The size of the working set, in GB. If not given, use ' 'the full size of the device. If using ' '--fio_generate_scenarios and not running against a raw ' 'device, you must pass --fio_working_set_size.', lower_bound=0) flag_util.DEFINE_units('fio_blocksize', None,
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing CUDA toolkit 8 installation and cleanup functions.""" from perfkitbenchmarker import regex_util from perfkitbenchmarker import flags from perfkitbenchmarker import flag_util TESLA_K80_MAX_CLOCK_SPEEDS = [2505, 875] flag_util.DEFINE_integerlist( 'gpu_clock_speeds', flag_util.IntegerList(TESLA_K80_MAX_CLOCK_SPEEDS), 'desired gpu clock speeds in the form ' '[memory clock, graphics clock]') FLAGS = flags.FLAGS # TODO: Test the CUDA Ubuntu 14.04 installer, and if everything works ok, # automatically install the correct package depending on the OS image. CUDA_TOOLKIT_UBUNTU = 'cuda-repo-ubuntu1604_8.0.61-1_amd64.deb' CUDA_TOOLKIT_UBUNTU_URL = ('http://developer.download.nvidia.com/compute/cuda' '/repos/ubuntu1604/x86_64/%s' % CUDA_TOOLKIT_UBUNTU) CUDA_TOOLKIT_INSTALL_DIR = '/usr/local/cuda' EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)' class UnsupportedClockSpeedException(Exception):
Azure: machine_type: Standard_NC6 zone: eastus boot_disk_size: 200 vm_count: null """ flags.DEFINE_integer( 'hpcg_runtime', 60, 'hpcg runtime in seconds', lower_bound=1) flags.DEFINE_integer( 'hpcg_gpus_per_node', None, 'The number of gpus per node.', lower_bound=1) flag_util.DEFINE_integerlist( 'hpcg_problem_size', flag_util.IntegerList([256, 256, 256]), 'three dimensional problem size for each node. Must contain ' 'three integers', module_name=__name__) class HpcgParseOutputException(Exception): pass class HpcgIncorrectProblemSizeException(Exception): pass def GetConfig(user_config): """Load and return benchmark config.
def testIter(self): il = flag_util.IntegerList([1, (2, 5), 9]) self.assertEqual(list(il), [1, 2, 3, 4, 5, 9])
def testRangeGetItem(self): il = flag_util.IntegerList([1, (2, 5), 9]) self.assertEqual(il[1], 2) self.assertEqual(il[2], 3) self.assertEqual(il[5], 9)
30, 'number of iterations to run', lower_bound=1) flags.DEFINE_enum( 'gpu_pcie_bandwidth_mode', 'quick', ['quick', 'range'], 'bandwidth test mode to use. ' 'If range is selected, provide desired range ' 'in flag gpu_pcie_bandwidth_transfer_sizes. ' 'Additionally, if range is selected, the resulting ' 'bandwidth will be averaged over all provided transfer ' 'sizes.') flag_util.DEFINE_integerlist( 'gpu_pcie_bandwidth_transfer_sizes', flag_util.IntegerList( [DEFAULT_RANGE_START, DEFAULT_RANGE_END, DEFAULT_RANGE_STEP]), 'range of transfer sizes to use in bytes. ' 'Only used if gpu_pcie_bandwidth_mode is set to range') FLAGS = flags.FLAGS BENCHMARK_NAME = 'gpu_pcie_bandwidth' BENCHMARK_CONFIG = """ gpu_pcie_bandwidth: description: Runs NVIDIA's CUDA bandwidth test. vm_groups: default: vm_spec: GCP: image: ubuntu-1604-xenial-v20161115 image_project: ubuntu-os-cloud
def testNotEqFalse(self): il = flag_util.IntegerList([1, 2, 3]) self.assertTrue([1] != il)
def testNotEqTrue(self): il = flag_util.IntegerList([1, 2, 3]) self.assertNotEqual([1], il)
def testEqFalse(self): il = flag_util.IntegerList([1, 2, 3]) self.assertFalse([1] == il)
def testSerializeRangeNegativeNumbers(self): ser = flag_util.IntegerListSerializer() il = flag_util.IntegerList([(-5, 3)]) self.assertEqual(ser.serialize(il), '-5:3') il = flag_util.IntegerList([(4, -2)]) self.assertEqual(ser.serialize(il), '4:-2')
def testSimpleGetItem(self): il = flag_util.IntegerList([1, 2, 3]) self.assertEqual(il[0], 1) self.assertEqual(il[1], 2) self.assertEqual(il[2], 3)
def testOutOfRangeIndexError(self): il = flag_util.IntegerList([1, 2, 3]) with self.assertRaises(IndexError): il[4]
zone: eastus vm_count: null """ flags.DEFINE_integer('hpcg_runtime', 60, 'hpcg runtime in seconds', lower_bound=1) flags.DEFINE_integer('hpcg_gpus_per_node', None, 'The number of gpus per node.', lower_bound=1) flag_util.DEFINE_integerlist( 'hpcg_problem_size', flag_util.IntegerList([256, 256, 256]), 'three dimensional problem size for each node. Must contain ' 'three integers') class HpcgParseOutputException(Exception): pass class HpcgIncorrectProblemSizeException(Exception): pass def GetConfig(user_config): """Load and return benchmark config.
def testRangeWithStepGetItem(self): il = flag_util.IntegerList([1, (2, 7, 2), 9]) self.assertEqual(il[1], 2) self.assertEqual(il[2], 4) self.assertEqual(il[3], 6) self.assertEqual(il[4], 9)
import os from perfkitbenchmarker import configs from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import hpc_util from perfkitbenchmarker import vm_util from perfkitbenchmarker import flag_util from perfkitbenchmarker.linux_packages import shoc_benchmark_suite from perfkitbenchmarker.linux_packages import cuda_toolkit flags.DEFINE_integer( 'stencil2d_iterations', 5, 'number of iterations to run', lower_bound=1) flag_util.DEFINE_integerlist( 'stencil2d_problem_sizes', flag_util.IntegerList([4096]), 'problem sizes to run. Can specify a single ' 'number, like --stencil2d_problem_sizes=4096 ' 'or a list like --stencil2d_problem_sizes=' '1024,4096', on_nonincreasing=flag_util.IntegerListParser.WARN, module_name=__name__) FLAGS = flags.FLAGS MACHINEFILE = 'machinefile' BENCHMARK_NAME = 'stencil2d' BENCHMARK_VERSION = '0.25' BENCHMARK_CONFIG = """ stencil2d: description: Runs Stencil2D from SHOC Benchmark Suite.\ Specify the number of VMs with --num_vms vm_groups:
def testIterWithStep(self): il = flag_util.IntegerList([1, (2, 6, 2), 9]) self.assertEqual(list(il), [1, 2, 4, 6, 9])
def testSerialize(self): ser = flag_util.IntegerListSerializer() il = flag_util.IntegerList([1, (2, 5), 9]) self.assertEqual(ser.serialize(il), '1,2-5,9')
None, 'Maximum number of iterations to run during ' 'confidence interval estimation. If unset, ' 'a single iteration will be run.', lower_bound=3, upper_bound=30) flags.DEFINE_integer('netperf_test_length', 60, 'netperf test length, in seconds', lower_bound=1) flags.DEFINE_bool( 'netperf_enable_histograms', True, 'Determines whether latency histograms are ' 'collected/reported. Only for *RR benchmarks') flag_util.DEFINE_integerlist( 'netperf_num_streams', flag_util.IntegerList([1]), 'Number of netperf processes to run. Netperf ' 'will run once for each value in the list.') flags.DEFINE_integer('netperf_thinktime', 0, 'Time in nanoseconds to do work for each request.') flags.DEFINE_integer('netperf_thinktime_array_size', 0, 'The size of the array to traverse for thinktime.') flags.DEFINE_integer( 'netperf_thinktime_run_length', 0, 'The number of contiguous numbers to sum at a time in the ' 'thinktime array.') ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR'] flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS, 'The netperf benchmark(s) to run.') flags.RegisterValidator(
def testSerializeWithStep(self): ser = flag_util.IntegerListSerializer() il = flag_util.IntegerList([1, (2, 5, 2), 9]) self.assertEqual(ser.Serialize(il), '1,2-5-2,9')
flags.DEFINE_string('sysbench_testname', 'oltp_read_write', 'The built in oltp lua script to run') flags.DEFINE_integer('sysbench_tables', 4, 'The number of tables used in sysbench oltp.lua tests') flags.DEFINE_integer('sysbench_table_size', 100000, 'The number of rows of each table used in the oltp tests') flags.DEFINE_integer('sysbench_warmup_seconds', 120, 'The duration of the warmup run in which results are ' 'discarded, in seconds.') flags.DEFINE_integer('sysbench_run_seconds', 480, 'The duration of the actual run in which results are ' 'collected, in seconds.') flag_util.DEFINE_integerlist( 'sysbench_thread_counts', flag_util.IntegerList([1, 2, 4, 8, 16, 32, 64]), 'array of thread counts passed to sysbench, one at a time') flags.DEFINE_integer('sysbench_latency_percentile', 100, 'The latency percentile we ask sysbench to compute.') flags.DEFINE_integer('sysbench_report_interval', 2, 'The interval, in seconds, we ask sysbench to report ' 'results.') BENCHMARK_NAME = 'sysbench' BENCHMARK_CONFIG = """ sysbench: description: Sysbench OLTP benchmarks. managed_relational_db: engine: mysql vm_spec: GCP:
def testSimpleLength(self): il = flag_util.IntegerList([1, 2, 3]) self.assertEqual(len(il), 3)
'fio_target_mode', AGAINST_FILE_WITHOUT_FILL_MODE, [ AGAINST_DEVICE_WITH_FILL_MODE, AGAINST_DEVICE_WITHOUT_FILL_MODE, AGAINST_FILE_WITH_FILL_MODE, AGAINST_FILE_WITHOUT_FILL_MODE ], 'Whether to run against a raw device or a file, and whether ' 'to prefill.') flags.DEFINE_string( 'fio_fill_size', '100%', 'The amount of device to fill in prepare stage. ' 'The valid value can either be an integer, which ' 'represents the number of bytes to fill or a ' 'percentage, which represents the percentage ' 'of the device. A filesystem will be unmounted before ' 'filling and remounted afterwards. Only valid when ' '--fio_target_mode is against_device_with_fill or ' 'against_file_with_fill.') flag_util.DEFINE_integerlist('fio_io_depths', flag_util.IntegerList([1]), 'IO queue depths to run on. Can specify a single ' 'number, like --fio_io_depths=1, a range, like ' '--fio_io_depths=1-4, or a list, like ' '--fio_io_depths=1-4,6-8', on_nonincreasing=flag_util.IntegerListParser.WARN, module_name=__name__) flag_util.DEFINE_integerlist('fio_num_jobs', flag_util.IntegerList([1]), 'Number of concurrent fio jobs to run.', on_nonincreasing=flag_util.IntegerListParser.WARN, module_name=__name__) flags.DEFINE_integer('fio_working_set_size', None, 'The size of the working set, in GB. If not given, use ' 'the full size of the device. If using '
def testRangeLength(self): il = flag_util.IntegerList([1, (2, 5), 9]) self.assertEqual(len(il), 6)
flags.DEFINE_integer( 'sysbench_table_size', 100000, 'The number of rows of each table used in the oltp tests') flags.DEFINE_integer('sysbench_scale', 100, 'Scale parameter as used by TPCC benchmark.') flags.DEFINE_integer( 'sysbench_warmup_seconds', 10, 'The duration of the warmup run in which results are ' 'discarded, in seconds.') flags.DEFINE_integer( 'sysbench_run_seconds', 10, 'The duration of the actual run in which results are ' 'collected, in seconds.') flag_util.DEFINE_integerlist( 'sysbench_thread_counts', flag_util.IntegerList([64]), 'array of thread counts passed to sysbench, one at a time', module_name=__name__) flags.DEFINE_integer('sysbench_latency_percentile', 100, 'The latency percentile we ask sysbench to compute.') flags.DEFINE_integer( 'sysbench_report_interval', 2, 'The interval, in seconds, we ask sysbench to report ' 'results.') flags.DEFINE_integer( 'sysbench_pre_failover_seconds', 0, 'If non zero, then after the sysbench workload is ' 'complete, a failover test will be performed. ' 'When a failover test is run, the database will be driven ' 'using the last entry in sysbench_thread_counts. After ' 'sysbench_pre_failover_seconds, a failover will be '
def testRangeLengthWithStep(self): il = flag_util.IntegerList([1, (2, 7, 2), 9]) self.assertEqual(len(il), 5)