def RunIt(repeat_number=None, minutes_since_start=None, total_repeats=None): """Run the actual fio command on the VM and save the results. Args: repeat_number: if given, our number in a sequence of repetitions. minutes_since_start: if given, minutes since the start of repetition. total_repeats: if given, the total number of repetitions to do. """ if repeat_number: logging.info('**** Repetition number %s of %s ****', repeat_number, total_repeats) stdout, stderr = vm.RobustRemoteCommand(fio_command, should_log=True) if repeat_number: base_metadata = { 'repeat_number': repeat_number, 'minutes_since_start': minutes_since_start } else: base_metadata = None samples.extend( fio.ParseResults(job_file_string, json.loads(stdout), base_metadata=base_metadata))
def _ParseFioJson(fio_json): """Parse fio json output. Args: fio_json: string. Json output from fio comomand. Returns: A list of sample.Sample object. """ samples = [] for job in json.loads(fio_json)['jobs']: cmd = job['fio_command'] # Get rid of ./fio. cmd = ' '.join(cmd.split()[1:]) additional_metadata = {'cmd': cmd} # Remove ssmw suffix from job name. try: job['jobname'] = regex_util.Substitute( STEADY_STATE_MEASUREMENT_WINDOW, '', job['jobname']) additional_metadata['steady_state'] = True except regex_util.NoMatchError: additional_metadata['steady_state'] = False # Mock fio_json to reuse fio parser. mock_json = {'jobs': [job]} new_samples = fio.ParseResults( fio.FioParametersToJob(cmd).__str__(), mock_json) for s in new_samples: s.metadata.update(additional_metadata) samples += new_samples return samples
def Run(benchmark_spec): """Spawn fio and gather the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vm = benchmark_spec.vms[0] logging.info('FIO running on %s', vm) disk = vm.scratch_disks[0] mount_point = disk.mount_point job_file_string = GetOrGenerateJobFileString( FLAGS.fio_jobfile, FLAGS.fio_generate_scenarios, AgainstDevice(), disk, FLAGS.fio_io_depths, FLAGS.fio_num_jobs, FLAGS.fio_working_set_size, FLAGS.fio_blocksize, FLAGS.fio_runtime, FLAGS.fio_parameters) job_file_path = vm_util.PrependTempDir(LOCAL_JOB_FILE_NAME) with open(job_file_path, 'w') as job_file: job_file.write(job_file_string) logging.info('Wrote fio job file at %s', job_file_path) vm.PushFile(job_file_path, REMOTE_JOB_FILE_PATH) if AgainstDevice(): fio_command = 'sudo %s --output-format=json --filename=%s %s' % ( fio.FIO_PATH, disk.GetDevicePath(), REMOTE_JOB_FILE_PATH) else: fio_command = 'sudo %s --output-format=json --directory=%s %s' % ( fio.FIO_PATH, mount_point, REMOTE_JOB_FILE_PATH) collect_logs = any([FLAGS.fio_lat_log, FLAGS.fio_bw_log, FLAGS.fio_iops_log]) if collect_logs: fio_command = ' '.join([fio_command, GetLogFlags()]) # TODO(user): This only gives results at the end of a job run # so the program pauses here with no feedback to the user. # This is a pretty lousy experience. logging.info('FIO Results:') stdout, stderr = vm.RobustRemoteCommand(fio_command, should_log=True) samples = fio.ParseResults(job_file_string, json.loads(stdout)) if collect_logs: vm.PullFile(vm_util.GetTempDir(), '%s_*.log' % PKB_FIO_LOG_FILE_NAME) return samples
def RunSimulatedDatabase(vm): """Spawn fio to simulate database and gather the results. Args: vm: The vm that synthetic_storage_workloads_benchmark will be run upon. Returns: A list of sample.Sample objects """ test_size = min(vm.total_memory_kb / 10, 1000000) iodepth_list = FLAGS.iodepth_list or DEFAULT_DATABASE_SIMULATION_IODEPTH_LIST results = [] for depth in iodepth_list: cmd = ('--filesize=10g ' '--directory=%s ' '--ioengine=libaio ' '--filename=fio_test_file ' '--overwrite=1 ' '--invalidate=0 ' '--direct=1 ' '--randrepeat=0 ' '--iodepth=%s ' '--size=%dk ' '--blocksize=4k ') % (vm.GetScratchDir(), depth, test_size) if FLAGS.maxjobs: cmd += '--max-jobs=%s ' % FLAGS.maxjobs cmd += ('--name=random_write ' '--rw=randwrite ' '--end_fsync=1 ' '--name=random_read ' '--stonewall ' '--rw=randread ' '--name=mixed_randrw ' '--stonewall ' '--rw=randrw ' '--rwmixread=90 ' '--rwmixwrite=10 ' '--end_fsync=1 ') logging.info('FIO Results for simulated %s, iodepth %s', DATABASE, depth) res, _ = vm.RemoteCommand('%s %s' % (fio.FIO_CMD_PREFIX, cmd), should_log=True) instance, _, _ = vm.RemoteHostCommandWithReturnCode( 'cat /etc/kubenode') instance = instance.strip() results.extend( fio.ParseResults(fio.FioParametersToJob(cmd), json.loads(res), instance=instance)) UpdateWorkloadMetadata(results) return results
def testParseResultsBaseMetadata(self): with mock.patch(fio.__name__ + '.ParseJobFile', return_value={ 'sequential_write': {}, 'sequential_read': {}, 'random_write_test': {}, 'random_read_test': {}, 'random_read_test_parallel': {} }): results = fio.ParseResults('', self.result_contents, base_metadata=BASE_METADATA) for result in results: self.assertDictContainsSubset(BASE_METADATA, result.metadata)
def _RunFio(vm, fio_params, metadata): """Run fio. Args: vm: Virtual machine to run fio on. fio_params: fio parameters used to create the fio command to run. metadata: Metadata to add to the results. Returns: A list of sample.Sample objects """ stdout, _ = vm.RemoteCommand('sudo {0} {1}'.format(fio.GetFioExec(), fio_params)) job_file_contents = fio.FioParametersToJob(fio_params) samples = fio.ParseResults(job_file_contents, json.loads(stdout), base_metadata=metadata, skip_latency_individual_stats=True) return samples
def RunSimulatedStreaming(vm): """Spawn fio to simulate streaming and gather the results. Args: vm: The vm that synthetic_storage_workloads_benchmark will be run upon. Returns: A list of sample.Sample objects """ test_size = min(vm.total_memory_kb / 10, 1000000) iodepth_list = FLAGS.iodepth_list or DEFAULT_STREAMING_SIMULATION_IODEPTH_LIST results = [] for depth in iodepth_list: cmd = ( '--filesize=10g ' '--directory=%s ' '--ioengine=libaio ' '--overwrite=0 ' '--invalidate=1 ' '--direct=1 ' '--randrepeat=0 ' '--iodepth=%s ' '--blocksize=1m ' '--size=%dk ' '--filename=fio_test_file ') % (vm.GetScratchDir(), depth, test_size) if FLAGS.maxjobs: cmd += '--max-jobs=%s ' % FLAGS.maxjobs cmd += ( '--name=sequential_write ' '--rw=write ' '--end_fsync=1 ' '--name=sequential_read ' '--stonewall ' '--rw=read ') logging.info('FIO Results for simulated %s', STREAMING) res, _ = vm.RemoteCommand('%s %s' % (fio.FIO_CMD_PREFIX, cmd), should_log=True) results.extend( fio.ParseResults(fio.FioParametersToJob(cmd), json.loads(res))) UpdateWorkloadMetadata(results) return results
def testParseHistogramMultipleJobs(self): hist_dir = os.path.join(self.data_dir, 'hist') job_file = _ReadFileToString( os.path.join(hist_dir, 'pkb-7fb0c9d8-0_fio.job')) fio_json_result = json.loads( _ReadFileToString(os.path.join(hist_dir, 'pkb-7fb0c9d8-0_fio.json'))) log_file_base = 'pkb_fio_avg_1506559526.49' single_bin_vals = [ float(f) for f in _ReadFileToString( os.path.join(hist_dir, 'bin_vals')).split() ] # each hist file has its own bin_vals, but they're all the same bin_vals = [ single_bin_vals, single_bin_vals, single_bin_vals, single_bin_vals ] # redirect open to the hist subdirectory def OpenTestFile(filename): return open(os.path.join(hist_dir, os.path.basename(filename))) with mock.patch(fio.__name__ + '.open', new=mock.MagicMock(side_effect=OpenTestFile), create=True): results = fio.ParseResults(job_file, fio_json_result, None, log_file_base, bin_vals) actual_read_hist = _ExtractHistogramFromMetric( results, 'rand_16k_read_100%-io-depth-1-num-jobs-2:16384:read:histogram') expected_read_hist = json.loads( _ReadFileToString(os.path.join(hist_dir, 'expected_read.json'))) self.assertEqual(expected_read_hist, actual_read_hist) actual_write_hist = _ExtractHistogramFromMetric( results, 'rand_16k_write_100%-io-depth-1-num-jobs-2:16384:write:histogram') expected_write_hist = json.loads( _ReadFileToString(os.path.join(hist_dir, 'expected_write.json'))) self.assertEqual(expected_write_hist, actual_write_hist)
def RunSimulatedLogging(vm): """Spawn fio to simulate logging and gather the results. Args: vm: The vm that synthetic_storage_workloads_benchmark will be run upon. Returns: A list of sample.Sample objects """ test_size = vm.total_memory_kb cmd = ('--filesize=10g ' '--directory=%s ' '--ioengine=libaio ' '--filename=fio_test_file ' '--invalidate=1 ' '--randrepeat=0 ' '--direct=0 ' '--size=%dk ' '--iodepth=%d ') % (vm.GetScratchDir(), test_size, DEFAULT_IODEPTH) if FLAGS.maxjobs: cmd += '--max-jobs=%s ' % FLAGS.maxjobs cmd += ('--name=sequential_write ' '--overwrite=0 ' '--rw=write ' '--end_fsync=1 ' '--name=random_read ' '--size=%dk ' '--stonewall ' '--rw=randread ' '--name=sequential_read ' '--stonewall ' '--rw=read ') % (test_size / 10) logging.info('FIO Results for simulated %s', LOGGING) res, _ = vm.RemoteCommand('%s %s' % (fio.FIO_CMD_PREFIX, cmd), should_log=True) instance, _, _ = vm.RemoteHostCommandWithReturnCode('cat /etc/kubenode') instance = instance.strip() results = fio.ParseResults(fio.FioParametersToJob(cmd), json.loads(res), instance=instance) UpdateWorkloadMetadata(results) return results
def RunWithExec(benchmark_spec, exec_path, remote_job_file_path, job_file_contents): """Spawn fio and gather the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. exec_path: string path to the fio executable. remote_job_file_path: path, on the vm, to the location of the job file. job_file_contents: string contents of the fio job file. Returns: A list of sample.Sample objects. """ vm = benchmark_spec.vms[0] logging.info('FIO running on %s', vm) disk = vm.scratch_disks[0] mount_point = disk.mount_point job_file_string = GetOrGenerateJobFileString( FLAGS.fio_jobfile, FLAGS.fio_generate_scenarios, AgainstDevice(), disk, FLAGS.fio_io_depths, FLAGS.fio_num_jobs, FLAGS.fio_working_set_size, FLAGS.fio_blocksize, FLAGS.fio_runtime, FLAGS.fio_parameters, job_file_contents) job_file_path = vm_util.PrependTempDir(vm.name + LOCAL_JOB_FILE_SUFFIX) with open(job_file_path, 'w') as job_file: job_file.write(job_file_string) logging.info('Wrote fio job file at %s', job_file_path) logging.info(job_file_string) vm.PushFile(job_file_path, remote_job_file_path) if AgainstDevice(): fio_command = '%s --output-format=json --filename=%s %s' % ( exec_path, disk.GetDevicePath(), remote_job_file_path) else: fio_command = '%s --output-format=json --directory=%s %s' % ( exec_path, mount_point, remote_job_file_path) collect_logs = any([ FLAGS.fio_lat_log, FLAGS.fio_bw_log, FLAGS.fio_iops_log, FLAGS.fio_hist_log ]) log_file_base = '' if collect_logs: log_file_base = '%s_%s' % (PKB_FIO_LOG_FILE_NAME, str(time.time())) fio_command = ' '.join([fio_command, GetLogFlags(log_file_base)]) # TODO(user): This only gives results at the end of a job run # so the program pauses here with no feedback to the user. # This is a pretty lousy experience. logging.info('FIO Results:') stdout, _ = vm.RobustRemoteCommand(fio_command, should_log=True) bin_vals = [] if collect_logs: vm.PullFile(vm_util.GetTempDir(), '%s*.log' % log_file_base) if FLAGS.fio_hist_log: num_logs = int( vm.RemoteCommand('ls %s_clat_hist.*.log | wc -l' % log_file_base)[0]) bin_vals += [ fio.ComputeHistogramBinVals( vm, '%s_clat_hist.%s.log' % (log_file_base, idx + 1)) for idx in range(num_logs) ] samples = fio.ParseResults(job_file_string, json.loads(stdout), log_file_base=log_file_base, bin_vals=bin_vals) return samples
def testParseFioResults(self): with mock.patch(fio.__name__ + '.ParseJobFile', return_value={ 'sequential_write': {}, 'sequential_read': {}, 'random_write_test': {}, 'random_read_test': {}, 'random_read_test_parallel': {} }): result = fio.ParseResults('', self.result_contents) expected_result = [ [ 'sequential_write:write:bandwidth', 68118, 'KB/s', { 'bw_max': 74454, 'bw_agg': 63936.8, 'bw_min': 19225, 'bw_dev': 20346.28, 'bw_mean': 63936.8, 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency', 477734.84, 'usec', { 'max': 869891, 'stddev': 92609.34, 'min': 189263, 'mean': 477734.84, 'p60': 444416, 'p1': 387072, 'p99.9': 872448, 'p70': 448512, 'p5': 440320, 'p90': 610304, 'p99.95': 872448, 'p80': 452608, 'p95': 724992, 'p10': 440320, 'p99.5': 847872, 'p99': 823296, 'p20': 440320, 'p99.99': 872448, 'p30': 444416, 'p50': 444416, 'p40': 444416, 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:min', 189263, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:max', 869891, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:mean', 477734.84, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:stddev', 92609.34, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p1', 387072, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p5', 440320, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p10', 440320, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p20', 440320, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p30', 444416, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p40', 444416, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p50', 444416, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p60', 444416, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p70', 448512, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p80', 452608, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p90', 610304, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p95', 724992, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p99', 823296, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p99.5', 847872, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p99.9', 872448, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p99.95', 872448, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:latency:p99.99', 872448, 'usec', { 'fio_job': 'sequential_write' } ], [ 'sequential_write:write:iops', 133, '', { 'fio_job': 'sequential_write' } ], [ 'sequential_read:read:bandwidth', 129836, 'KB/s', { 'bw_max': 162491, 'bw_agg': 130255.2, 'bw_min': 115250, 'bw_dev': 18551.37, 'bw_mean': 130255.2, 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency', 250667.06, 'usec', { 'max': 528542, 'stddev': 70403.40, 'min': 24198, 'mean': 250667.06, 'p60': 268288, 'p1': 59136, 'p99.9': 528384, 'p70': 272384, 'p5': 116224, 'p90': 292864, 'p99.95': 528384, 'p80': 280576, 'p95': 366592, 'p10': 164864, 'p99.5': 489472, 'p99': 473088, 'p20': 199680, 'p99.99': 528384, 'p30': 246784, 'p50': 264192, 'p40': 257024, 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:min', 24198, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:max', 528542, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:mean', 250667.06, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:stddev', 70403.40, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p1', 59136, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p5', 116224, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p10', 164864, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p20', 199680, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p30', 246784, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p40', 257024, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p50', 264192, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p60', 268288, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p70', 272384, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p80', 280576, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p90', 292864, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p95', 366592, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p99', 473088, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p99.5', 489472, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p99.9', 528384, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p99.95', 528384, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:latency:p99.99', 528384, 'usec', { 'fio_job': 'sequential_read' } ], [ 'sequential_read:read:iops', 253, '', { 'fio_job': 'sequential_read' } ], [ 'random_write_test:write:bandwidth', 6443, 'KB/s', { 'bw_max': 7104, 'bw_agg': 6446.55, 'bw_min': 5896, 'bw_dev': 336.21, 'bw_mean': 6446.55, 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency', 587.02, 'usec', { 'max': 81806, 'stddev': 897.93, 'min': 1, 'mean': 587.02, 'p60': 524, 'p1': 446, 'p99.9': 3216, 'p70': 532, 'p5': 462, 'p90': 636, 'p99.95': 4128, 'p80': 564, 'p95': 1064, 'p10': 470, 'p99.5': 1736, 'p99': 1688, 'p20': 482, 'p99.99': 81408, 'p30': 494, 'p50': 510, 'p40': 502, 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:min', 1, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:max', 81806, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:mean', 587.02, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:stddev', 897.93, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p1', 446, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p5', 462, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p10', 470, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p20', 482, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p30', 494, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p40', 502, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p50', 510, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p60', 524, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p70', 532, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p80', 564, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p90', 636, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p95', 1064, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p99', 1688, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p99.5', 1736, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p99.9', 3216, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p99.95', 4128, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:latency:p99.99', 81408, 'usec', { 'fio_job': 'random_write_test' } ], [ 'random_write_test:write:iops', 1610, '', { 'fio_job': 'random_write_test' } ], [ 'random_read_test:read:bandwidth', 1269, 'KB/s', { 'bw_max': 1745, 'bw_agg': 1275.52, 'bw_min': 330, 'bw_dev': 201.59, 'bw_mean': 1275.52, 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency', 3117.62, 'usec', { 'max': 352736, 'stddev': 5114.37, 'min': 0, 'mean': 3117.62, 'p60': 3312, 'p1': 524, 'p99.9': 6880, 'p70': 3344, 'p5': 588, 'p90': 3408, 'p99.95': 11840, 'p80': 3376, 'p95': 3440, 'p10': 2544, 'p99.5': 4128, 'p99': 3728, 'p20': 3152, 'p99.99': 354304, 'p30': 3216, 'p50': 3280, 'p40': 3248, 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:min', 0, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:max', 352736, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:mean', 3117.62, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:stddev', 5114.37, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p1', 524, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p5', 588, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p10', 2544, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p20', 3152, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p30', 3216, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p40', 3248, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p50', 3280, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p60', 3312, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p70', 3344, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p80', 3376, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p90', 3408, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p95', 3440, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p99', 3728, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p99.5', 4128, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p99.9', 6880, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p99.95', 11840, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:latency:p99.99', 354304, 'usec', { 'fio_job': 'random_read_test' } ], [ 'random_read_test:read:iops', 317, '', { 'fio_job': 'random_read_test' } ], [ 'random_read_test_parallel:read:bandwidth', 1292, 'KB/s', { 'bw_max': 1693, 'bw_agg': 1284.71, 'bw_min': 795, 'bw_dev': 88.67, 'bw_mean': 1284.71, 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency', 198030.44, 'usec', { 'max': 400078, 'stddev': 21709.40, 'min': 0, 'mean': 198030.44, 'p60': 199680, 'p1': 65280, 'p99.9': 370688, 'p70': 203776, 'p5': 189440, 'p90': 205824, 'p99.95': 387072, 'p80': 203776, 'p95': 209920, 'p10': 189440, 'p99.5': 257024, 'p99': 209920, 'p20': 193536, 'p99.99': 399360, 'p30': 197632, 'p50': 199680, 'p40': 197632, 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:min', 0, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:max', 400078, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:mean', 198030.44, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:stddev', 21709.40, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p1', 65280, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p5', 189440, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p10', 189440, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p20', 193536, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p30', 197632, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p40', 197632, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p50', 199680, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p60', 199680, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p70', 203776, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p80', 203776, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p90', 205824, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p95', 209920, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p99', 209920, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p99.5', 257024, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p99.9', 370688, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p99.95', 387072, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:latency:p99.99', 399360, 'usec', { 'fio_job': 'random_read_test_parallel' } ], [ 'random_read_test_parallel:read:iops', 323, '', { 'fio_job': 'random_read_test_parallel' } ] ] expected_result = [ sample.Sample(*sample_tuple) for sample_tuple in expected_result ] self.assertSampleListsEqualUpToTimestamp(result, expected_result)
def RunWithExec(vm, exec_path, remote_job_file_path, job_file_contents): """Spawn fio and gather the results. Args: vm: vm to run the benchmark on. exec_path: string path to the fio executable. remote_job_file_path: path, on the vm, to the location of the job file. job_file_contents: string contents of the fio job file. Returns: A list of sample.Sample objects. """ logging.info('FIO running on %s', vm) disk = vm.scratch_disks[0] mount_point = disk.mount_point if FLAGS.fio_write_against_multiple_clients: mount_point = '%s/%s' % (disk.mount_point, vm.name) logging.info('FIO mount point changed to %s', mount_point) job_file_string = GetOrGenerateJobFileString( FLAGS.fio_jobfile, FLAGS.fio_generate_scenarios, AgainstDevice(), disk, FLAGS.fio_io_depths, FLAGS.fio_num_jobs, FLAGS.fio_working_set_size, FLAGS.fio_blocksize, FLAGS.fio_runtime, _DIRECT_IO.value, FLAGS.fio_parameters, job_file_contents) job_file_path = vm_util.PrependTempDir(vm.name + LOCAL_JOB_FILE_SUFFIX) with open(job_file_path, 'w') as job_file: job_file.write(job_file_string) logging.info('Wrote fio job file at %s', job_file_path) logging.info(job_file_string) vm.PushFile(job_file_path, remote_job_file_path) if AgainstDevice(): fio_command = ( f'{exec_path} --output-format=json ' f'--random_generator={FLAGS.fio_rng} ' f'--filename={disk.GetDevicePath()} {remote_job_file_path}') else: fio_command = (f'{exec_path} --output-format=json ' f'--random_generator={FLAGS.fio_rng} ' f'--directory={mount_point} {remote_job_file_path}') collect_logs = any([ FLAGS.fio_lat_log, FLAGS.fio_bw_log, FLAGS.fio_iops_log, FLAGS.fio_hist_log ]) log_file_base = '' if collect_logs: log_file_base = '%s_%s' % (PKB_FIO_LOG_FILE_NAME, str(time.time())) fio_command = ' '.join([fio_command, GetLogFlags(log_file_base)]) # TODO(user): This only gives results at the end of a job run # so the program pauses here with no feedback to the user. # This is a pretty lousy experience. logging.info('FIO Results:') start_time = time.time() stdout, _ = vm.RobustRemoteCommand(fio_command, should_log=True, timeout=FLAGS.fio_command_timeout_sec) end_time = time.time() bin_vals = [] if collect_logs: vm.PullFile(vm_util.GetTempDir(), '%s*.log' % log_file_base) if FLAGS.fio_hist_log: num_logs = int( vm.RemoteCommand('ls %s_clat_hist.*.log | wc -l' % log_file_base)[0]) bin_vals += [ fio.ComputeHistogramBinVals( vm, '%s_clat_hist.%s.log' % (log_file_base, idx + 1)) for idx in range(num_logs) ] samples = fio.ParseResults(job_file_string, json.loads(stdout), log_file_base=log_file_base, bin_vals=bin_vals) samples.append( sample.Sample('start_time', start_time, 'sec', samples[0].metadata)) samples.append( sample.Sample('end_time', end_time, 'sec', samples[0].metadata)) return samples