def test_hooks(self): """Job runs hooks before/after in stack order""" self.mock_benchmark['path'] = 'true' self.job_config['hooks'] = [ {'hook': 'first', 'options': {'a': 1}}, {'hook': 'second', 'options': {'b': 1}}, ] mock = MagicMock() first = mock.first second = mock.second def get_mock_hook(name): if name == 'first': return first else: return second HookFactory.create.side_effect = get_mock_hook job = Job(self.job_config, self.mock_benchmark) job.run() self.assertListEqual([ call.first.before_job({'a': 1}, job), call.second.before_job({'b': 1}, job), # post hooks run in reverse order call.second.after_job({'b': 1}, job), call.first.after_job({'a': 1}, job), ], mock.method_calls)
def test_tee_stdouterr(self): """tee_output option works correctly With tee_option=True, the job should print the subprocess stdout lines starting with 'stdout:' and stderr starting with 'stderr:'""" mock_data = 'line 1 from echo\nthis is the second line' self.job_config['args'] = [mock_data] self.job_config['metrics'] = ['key'] self.job_config['tee_output'] = True self.mock_benchmark['path'] = 'echo' self.mock_parser.parse.return_value = {'key': 'hello'} job = Job(self.job_config, self.mock_benchmark) job.run() expected = 'stdout: line 1 from echo\nstdout: this is the second line\n' self.assertEqual(sys.stdout.getvalue(), expected) # test with stderr and stdout # first reset stdout string sys.stdout.truncate(0) sys.stdout.seek(0) self.mock_benchmark['path'] = 'sh' self.job_config['args'] = [ '-c', 'echo "error" >&2 && echo "from stdout"' ] self.job_config['tee_output'] = True job = Job(self.job_config, self.mock_benchmark) job.run() expected = 'stdout: from stdout\nstderr: error\n' self.assertEqual(sys.stdout.getvalue(), expected)
def test_run_no_binary(self): """Nonexistent binary raises an error""" self.mock_benchmark["path"] = "somethingthatdoesntexist" self.mock_benchmark["metrics"] = [] job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(OSError): job.run()
def test_run_no_binary(self): """Nonexistent binary raises an error""" self.mock_benchmark['path'] = 'somethingthatdoesntexist' self.mock_benchmark['metrics'] = [] job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(OSError): job.run()
def test_run_timeout(self): """Binary running past timeout raises an error""" self.job_config["timeout"] = 0.1 self.mock_benchmark["path"] = "sleep" self.job_config["args"] = ["1"] job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(subprocess.TimeoutExpired): job.run()
def test_run_parser_error(self): """A crashed parser raises an error""" self.mock_benchmark["path"] = "true" self.mock_benchmark["metrics"] = [] self.mock_parser.parse.side_effect = ValueError("") job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(ValueError): job.run()
def test_run_timeout(self): """Binary running past timeout raises an error""" self.job_config['timeout'] = 0.1 self.mock_benchmark['path'] = 'sleep' self.job_config['args'] = ['1'] job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(subprocess.TimeoutExpired): job.run()
def test_run_parser_error(self): """A crashed parser raises an error""" self.mock_benchmark['path'] = 'true' self.mock_benchmark['metrics'] = [] self.mock_parser.parse.side_effect = ValueError('') job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(ValueError): job.run()
def test_run_fail_no_check_returncode(self): """Bad return code doesn't fail when check_returncode is False""" self.job_config["args"] = ["-c", 'echo "error" >&2; exit 1'] self.mock_benchmark["path"] = "sh" self.mock_benchmark["check_returncode"] = False job = Job(self.job_config, self.mock_benchmark) # job.run won't raise an exception job.run()
def test_run_fail_no_check_returncode(self): """Bad return code doesn't fail when check_returncode is False""" self.job_config['args'] = ['-c', 'echo "error" >&2; exit 1'] self.mock_benchmark['path'] = 'sh' self.mock_benchmark['check_returncode'] = False job = Job(self.job_config, self.mock_benchmark) # job.run won't raise an exception job.run()
def test_run_fail(self): """Exit 1 raises an exception""" self.job_config['args'] = ['-c', 'echo "error" >&2; exit 1'] self.mock_benchmark['path'] = 'sh' job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(subprocess.CalledProcessError) as e: job.run() e = e.exception self.assertEqual('stdout:\n\nstderr:\nerror', e.output.rstrip())
def test_run_fail(self): """Exit 1 raises an exception""" self.job_config["args"] = ["-c", 'echo "error" >&2; exit 1'] self.mock_benchmark["path"] = "sh" job = Job(self.job_config, self.mock_benchmark) with self.assertRaises(subprocess.CalledProcessError) as e: job.run() e = e.exception self.assertEqual("stdout:\n\nstderr:\nerror", e.output.rstrip())
def test_run_timeout_is_pass(self): """Binary running past timeout raises an error""" self.job_config["timeout"] = 0.1 self.job_config["timeout_is_pass"] = True self.mock_benchmark["path"] = "/bin/sh" self.job_config["args"] = [ "-c", 'echo "wow" && echo "err" > /dev/stderr && sleep 2', ] self.mock_parser.parse.return_value = {"success": "True"} job = Job(self.job_config, self.mock_benchmark) job.run() self.mock_parser.parse.assert_called_with(["wow"], ["err"], 0)
def test_tee_stdouterr(self): """tee_output option works correctly With tee_option=True, the job should print the subprocess stdout lines starting with 'stdout:' and stderr starting with 'stderr:'""" mock_data = "line 1 from echo\nthis is the second line" self.job_config["args"] = [mock_data] self.job_config["metrics"] = ["key"] self.job_config["tee_output"] = True self.mock_benchmark["path"] = "echo" self.mock_parser.parse.return_value = {"key": "hello"} job = Job(self.job_config, self.mock_benchmark) # capture stdout/err orig_stdout, orig_stderr = sys.stdout, sys.stderr sys.stdout = io.StringIO() sys.stderr = io.StringIO() job.run() expected = "stdout: line 1 from echo\nstdout: this is the second line\n" self.assertEqual(sys.stdout.getvalue(), expected) # test with stderr and stdout # first reset stdout string sys.stdout.truncate(0) sys.stdout.seek(0) self.mock_benchmark["path"] = "sh" self.job_config["args"] = [ "-c", 'echo "error" >&2 && echo "from stdout"' ] self.job_config["tee_output"] = True job = Job(self.job_config, self.mock_benchmark) job.run() expected = "stdout: from stdout\nstderr: error\n" self.assertEqual(sys.stdout.getvalue(), expected) sys.stdout = orig_stdout sys.stderr = orig_stderr
def test_hooks(self): """Job runs hooks before/after in stack order""" self.mock_benchmark["path"] = "true" self.job_config["hooks"] = [ { "hook": "first", "options": { "a": 1 } }, { "hook": "second", "options": { "b": 1 } }, ] mock = MagicMock() first = mock.first second = mock.second def get_mock_hook(name): if name == "first": return first else: return second HookFactory.create.side_effect = get_mock_hook job = Job(self.job_config, self.mock_benchmark) job.run() self.assertListEqual( [ call.first.before_job({"a": 1}, job), call.second.before_job({"b": 1}, job), # post hooks run in reverse order call.second.after_job({"b": 1}, job), call.first.after_job({"a": 1}, job), ], mock.method_calls, )
def test_tee_output_file(self): """tee_output can write to file.""" mock_data = 'line 1 from echo\nthis is the second line' self.job_config['args'] = [mock_data] self.job_config['metrics'] = ['key'] fd, teefile = tempfile.mkstemp() os.close(fd) self.mock_benchmark['path'] = 'sh' self.job_config['args'] = ['-c', 'echo "error" >&2 && echo "from stdout"'] self.job_config['tee_output'] = teefile job = Job(self.job_config, self.mock_benchmark) job.run() expected = 'stdout: from stdout\nstderr: error\n' with open(teefile, 'r') as tmp: self.assertEqual(tmp.read(), expected) os.remove(teefile)
def test_tee_output_file(self): """tee_output can write to file.""" mock_data = "line 1 from echo\nthis is the second line" self.job_config["args"] = [mock_data] self.job_config["metrics"] = ["key"] fd, teefile = tempfile.mkstemp() os.close(fd) self.mock_benchmark["path"] = "sh" self.job_config["args"] = [ "-c", 'echo "error" >&2 && echo "from stdout"' ] self.job_config["tee_output"] = teefile job = Job(self.job_config, self.mock_benchmark) job.run() expected = "stdout: from stdout\nstderr: error\n" with open(teefile, "r") as tmp: self.assertEqual(tmp.read(), expected) os.remove(teefile)
def test_no_validate_metrics(self): """When validation is disabled, job leaves metrics as-is""" config = defaultdict(str) config['args'] = {} self.mock_benchmark['path'] = 'true' self.job_config['metrics'] = ['_no_validate', 'something'] job = Job(self.job_config, self.mock_benchmark) # an empty set of metrics should stay empty self.mock_parser.parse.return_value = {} metrics = job.run() self.assertEqual(len(metrics.metrics_list()), 0) # metric defined in config should remain self.mock_parser.parse.return_value = {'something': 1} metrics = job.run() self.assertEqual(len(metrics.metrics_list()), 1) # more metrics besides defined should keep all self.mock_parser.parse.return_value = {'something': 1, 'extra': 2} metrics = job.run() self.assertEqual(len(metrics.metrics_list()), 2)
def test_run_succeed(self): """Echo is able to run and be parsed correctly Run a job to echo some json and make sure it can be parse and is exported correctly.""" mock_data = '{"key": "hello"}' self.job_config['args'] = [mock_data] self.job_config['metrics'] = ['key'] self.mock_benchmark['path'] = 'echo' self.mock_parser.parse.return_value = {'key': 'hello'} job = Job(self.job_config, self.mock_benchmark) metrics = job.run() self.mock_parser.parse.assert_called_with([mock_data, ''], [''], 0) self.assertDictEqual({'key': 'hello'}, metrics.metrics())
def test_run_succeed(self): """Echo is able to run and be parsed correctly Run a job to echo some json and make sure it can be parse and is exported correctly.""" mock_data = '{"key": "hello"}' self.job_config["args"] = [mock_data] self.job_config["metrics"] = ["key"] self.mock_benchmark["path"] = "echo" self.mock_parser.parse.return_value = {"key": "hello"} job = Job(self.job_config, self.mock_benchmark) metrics = job.run() self.mock_parser.parse.assert_called_with([mock_data], [], 0) self.assertDictEqual({"key": "hello"}, metrics)