def test_rpma_fio_bench(tool_mode, readwrite, mode, monkeypatch): """test all arguments variants of rpma_fio_bench.sh""" def run_mock(args, env): assert len(args) == 5 # XXX is it always correct to assume the tool is in the current working # directory? assert args[0] == './' + TOOL_RPMA_FIO_BENCH assert args[1] == IP_DUMMY assert args[2] == tool_mode assert args[3] == readwrite assert args[4] == mode assert env['OUTPUT_FILE'] == output_file(ID_DUMMY, RESULT_DIR) return ProcessMock() monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = { **ONESERIES_DUMMY, 'tool': TOOL_RPMA_FIO_BENCH, 'mode': mode, 'tool_mode': tool_mode, 'rw': readwrite, 'busy_wait_polling': True } benchmark = Benchmark(oneseries) benchmark.run(CONFIG_DUMMY, RESULT_DIR) assert benchmark.is_done()
def test_incomplete_benchmark(key, monkeypatch): """an incomplete benchmark definition""" def run_mock(_args, **_): assert False, "subprocess.run() should not be called" monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = {**ONESERIES_DUMMY} oneseries.pop(key, None) benchmark = Benchmark(oneseries) with pytest.raises(ValueError): benchmark.run(CONFIG_DUMMY, RESULT_DIR) assert not benchmark.is_done()
def test_busy_wait_polling(config_busy_wait_polling, busy_wait_polling, expected_busy_wait_polling, monkeypatch): """busy_wait_polling to BUSY_WAIT_POLLING mapping""" def run_mock(_args, env): assert env['BUSY_WAIT_POLLING'] == expected_busy_wait_polling return ProcessMock() monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = {**ONESERIES_DUMMY, 'busy_wait_polling': busy_wait_polling} benchmark = Benchmark(oneseries) config = {**CONFIG_BIG, 'BUSY_WAIT_POLLING': config_busy_wait_polling} benchmark.run(config, RESULT_DIR) assert benchmark.is_done()
def test_filetype_pmem_no_mem_path(config_remote_job_mem_path, monkeypatch): """filetype=pmem when no REMOTE_JOB_MEM_PATH provided""" def run_mock(_args, **_): assert False, "subprocess.run() should not be called" monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = {**ONESERIES_DUMMY, 'filetype': 'pmem'} benchmark = Benchmark(oneseries) if config_remote_job_mem_path is None: CONFIG_BIG.pop('REMOTE_JOB_MEM_PATH', None) else: CONFIG_BIG['REMOTE_JOB_MEM_PATH'] = config_remote_job_mem_path with pytest.raises(ValueError): benchmark.run(CONFIG_BIG, RESULT_DIR) assert not benchmark.is_done()
def test_gpspm_no_busy_wait_polling(readwrite, mode, monkeypatch): """filetype=pmem when no REMOTE_JOB_MEM_PATH provided""" def run_mock(_args, **_): assert False, "subprocess.run() should not be called" monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = { **ONESERIES_DUMMY, 'tool': TOOL_RPMA_FIO_BENCH, 'mode': mode, 'tool_mode': 'gpspm', 'rw': readwrite } benchmark = Benchmark(oneseries) with pytest.raises(ValueError): benchmark.run(CONFIG_BIG, RESULT_DIR) assert not benchmark.is_done()
def test_ib_read(mode, monkeypatch): """test all arguments variants of ib_read.sh""" def run_mock(args, env): assert len(args) == 3 # XXX is it always correct to assume the tool is in the current working # directory? assert args[0] == './' + TOOL_IB_READ assert args[1] == IP_DUMMY assert args[2] == mode assert env['OUTPUT_FILE'] == output_file(ID_DUMMY, RESULT_DIR) return ProcessMock() monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = {**ONESERIES_DUMMY, 'tool': TOOL_IB_READ, 'mode': mode} benchmark = Benchmark(oneseries) benchmark.run(CONFIG_DUMMY, RESULT_DIR) assert benchmark.is_done()
def test_filetype(filetype, monkeypatch): """filetype to REMOTE_JOB_MEM_PATH mapping""" # sanity check assert CONFIG_BIG['REMOTE_JOB_MEM_PATH'] != 'malloc' def run_mock(_args, env): if filetype == 'malloc': assert env['REMOTE_JOB_MEM_PATH'] == 'malloc' else: assert env['REMOTE_JOB_MEM_PATH'] == \ CONFIG_BIG['REMOTE_JOB_MEM_PATH'] return ProcessMock() monkeypatch.setattr(subprocess, 'run', run_mock) oneseries = {**ONESERIES_DUMMY, 'filetype': filetype} benchmark = Benchmark(oneseries) benchmark.run(CONFIG_BIG, RESULT_DIR) assert benchmark.is_done()
def fixture_benchmark_dummy(): """create a very simple Benchmark instance""" return Benchmark({**ONESERIES_DUMMY})