class TestNewmanExecutor(BZTestCase): RUNNER_STUB = RESOURCES_DIR + "newman/newman" + (".bat" if is_windows() else ".sh") def full_run(self, config): self.obj = NewmanExecutor() self.obj.engine = EngineEmul() self.obj.engine.config.merge(config) execution = config["execution"][0] if isinstance(config["execution"], list) else config["execution"] self.obj.execution.merge(execution) self.obj.prepare() self.obj.get_launch_cmdline = lambda *args: [TestNewmanExecutor.RUNNER_STUB] + list(args) self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() self.obj.post_process() def test_flow(self): self.full_run({"execution": {"scenario": { "script": RESOURCES_DIR + 'functional/postman.json', "globals": {"a": 123}, }}}) self.assertTrue(os.path.exists(self.obj.report_file)) with open(self.obj.report_file) as fds: samples = [json.loads(line) for line in fds.readlines()] self.assertEqual(1, len(samples)) sample = samples[0] self.assertEqual(sample["status"], "PASSED") self.assertEqual(sample["test_case"], "should load")
class TestNewmanExecutor(BZTestCase): RUNNER_STUB = RESOURCES_DIR + "newman/newman" + (".bat" if is_windows() else ".sh") def full_run(self, config): self.obj = NewmanExecutor() self.obj.engine = EngineEmul() self.obj.env = self.obj.engine.env self.obj.engine.config.merge(config) execution = config["execution"][0] if isinstance(config["execution"], list) else config["execution"] self.obj.execution.merge(execution) self.obj.prepare() self.obj.get_launch_cmdline = lambda *args: [TestNewmanExecutor.RUNNER_STUB] + list(args) self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() self.obj.post_process() def test_flow(self): self.full_run({"execution": {"scenario": { "script": RESOURCES_DIR + 'functional/postman.json', "globals": {"a": 123}, }}}) self.assertTrue(os.path.exists(self.obj.report_file)) with open(self.obj.report_file) as fds: samples = [json.loads(line) for line in fds.readlines()] self.assertEqual(1, len(samples)) sample = samples[0] self.assertEqual(sample["status"], "PASSED") self.assertEqual(sample["test_case"], "should load")
def full_run(self, config): self.obj = NewmanExecutor() self.obj.engine = EngineEmul() self.obj.env = self.obj.engine.env self.obj.engine.config.merge(config) execution = config["execution"][0] if isinstance(config["execution"], list) else config["execution"] self.obj.execution.merge(execution) self.obj.prepare() self.obj.get_launch_cmdline = lambda *args: [TestNewmanExecutor.RUNNER_STUB] + list(args) self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() self.obj.post_process()
def full_run(self, config): def exec_and_communicate(*args, **kwargs): return "", "" self.obj = NewmanExecutor() self.obj.engine = EngineEmul() self.obj.engine.config.merge(config) execution = config["execution"][0] if isinstance(config["execution"], list) else config["execution"] self.obj.execution.merge(execution) tmp_aec = bzt.utils.exec_and_communicate try: bzt.utils.exec_and_communicate = exec_and_communicate self.obj.prepare() finally: bzt.utils.exec_and_communicate = tmp_aec self.obj.node.tool_path = self.RUNNER_STUB self.obj.startup() self.obj.shutdown() self.obj.post_process()
def full_run(self, config): self.obj = NewmanExecutor() self.obj.engine = EngineEmul() self.obj.engine.config.merge(config) execution = config["execution"][0] if isinstance(config["execution"], list) else config["execution"] self.obj.execution.merge(execution) self.obj.prepare() self.obj.get_launch_cmdline = lambda *args: [TestNewmanExecutor.RUNNER_STUB] + list(args) self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() self.obj.post_process()
class TestNewmanExecutor(BZTestCase): RUNNER_STUB = RESOURCES_DIR + "newman/newman" + EXE_SUFFIX def full_run(self, config): def exec_and_communicate(*args, **kwargs): return "", "" self.obj = NewmanExecutor() self.obj.engine = EngineEmul() self.obj.engine.config.merge(config) execution = config["execution"][0] if isinstance( config["execution"], list) else config["execution"] self.obj.execution.merge(execution) tmp_aec = bzt.utils.exec_and_communicate try: bzt.utils.exec_and_communicate = exec_and_communicate self.obj.prepare() finally: bzt.utils.exec_and_communicate = tmp_aec self.obj.node.tool_path = self.RUNNER_STUB self.obj.startup() self.obj.shutdown() self.obj.post_process() def test_flow(self): self.full_run({ "execution": { "scenario": { "script": RESOURCES_DIR + 'functional/postman.json', "globals": { "a": 123 }, } } }) self.assertTrue(os.path.exists(self.obj.report_file)) with open(self.obj.report_file) as fds: samples = [json.loads(line) for line in fds.readlines()] self.assertEqual(1, len(samples)) sample = samples[0] self.assertEqual(sample["status"], "PASSED") self.assertEqual(sample["test_case"], "should load")
def test_flow(self): obj = NewmanExecutor() obj.engine = EngineEmul() obj.env = obj.engine.env obj.engine.aggregator = ConsolidatingAggregator() obj.engine.config.merge({"scenarios": {"newman": { "script": RESOURCES_DIR + 'functional/postman.json', "globals": {"a": 123}, }}}) obj.execution.merge({"scenario": "newman"}) obj.engine.aggregator.prepare() obj.prepare() obj.startup() obj.engine.aggregator.startup() while not obj.check(): obj.engine.aggregator.check() time.sleep(obj.engine.check_interval) obj.shutdown() obj.engine.aggregator.shutdown() obj.post_process() obj.engine.aggregator.post_process() self.assertTrue(obj.has_results())
def test_broken(self): obj = NewmanExecutor() obj.engine = EngineEmul() obj.env = obj.engine.env obj.engine.aggregator = ConsolidatingAggregator() obj.engine.config.merge({ "scenarios": { "newman": { "script": RESOURCES_DIR + 'functional/postman.json', "globals": { "a": 123 }, } } }) obj.execution.merge({"scenario": "newman"}) obj.engine.aggregator.prepare() obj.prepare() obj.startup() obj.engine.aggregator.startup() while not obj.check(): obj.engine.aggregator.check() time.sleep(obj.engine.check_interval) obj.shutdown() obj.engine.aggregator.shutdown() obj.post_process() obj.engine.aggregator.post_process() self.assertTrue(obj.has_results()) with open(obj.report_file) as fds: samples = [json.loads(line) for line in fds.readlines()] self.assertEqual(1, len(samples)) sample = samples[0] self.assertEqual(sample["status"], "FAILED") self.assertEqual(sample["error_msg"], "expect response be 200")