def test_pbench_payload_relpath(self): "Verify that enhanced pbench preserves relative script path" script_path = "tests/data/pbench.src" obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": {"script": "tests/data/pbench.src"} }, "provisioning": "test", }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), "enhanced": True, }) obj.prepare() pbench_conf = os.path.join(obj.engine.artifacts_dir, "pbench.conf") with open(pbench_conf) as conf_fds: config = conf_fds.read() self.assertIn(script_path, config)
def test_pbench_payload_relpath(self): "Verify that enhanced pbench preserves relative script path" script_path = "tests/data/pbench.src" obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": { "script": "tests/data/pbench.src" } }, "provisioning": "test", }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), "enhanced": True, }) obj.prepare() pbench_conf = os.path.join(obj.engine.artifacts_dir, "pbench.conf") with open(pbench_conf) as conf_fds: config = conf_fds.read() self.assertIn(script_path, config)
def test_improved_request_building(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge( yaml.load( open(__dir__() + "/../yaml/phantom_improved_request.yml").read())) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) obj.prepare() with open(obj.pbench.schedule_file) as fds: config = fds.readlines() get_requests = [ req_str.split(" ")[1] for req_str in config if req_str.startswith("GET") ] self.assertEqual(len(get_requests), 2) for get_req in get_requests: self.assertEqual( dict(parse.parse_qsl(parse.urlsplit(get_req).query)), { "get_param1": "value1", "get_param2": "value2" })
def test_diagnostics(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config.merge({ "provisioning": "test", ScenarioExecutor.EXEC: [{ "throughput": 10, "hold-for": 30, "scenario": { "default-address": "http://blazedemo.com/", "requests": ["/"] } }] }) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": RESOURCES_DIR + "pbench/phantom.sh", }) obj.prepare() obj.startup() for _ in range(3): obj.check() obj.shutdown() obj.post_process() self.assertIsNotNone(obj.get_error_diagnostics())
def test_widget(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config.merge({ "provisioning": "test", ScenarioExecutor.EXEC: [ { "throughput": 10, "hold-for": 30, "scenario": { "default-address": "http://blazedemo.com/", "requests": ["/"] } } ]}) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) obj.prepare() obj.startup() obj.get_widget() self.assertTrue(isinstance(obj.widget.progress, urwid.ProgressBar)) self.assertEqual(obj.widget.duration, 30) self.assertEqual(obj.widget.widgets[0].text, "Target: http://blazedemo.com:80") obj.check() obj.shutdown()
def test_widget(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config.merge({ "provisioning": "test", ScenarioExecutor.EXEC: [{ "throughput": 10, "hold-for": 30, "scenario": { "default-address": "http://blazedemo.com/", "requests": ["/"] } }] }) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) obj.prepare() obj.startup() obj.get_widget() self.assertTrue(isinstance(obj.widget.progress, urwid.ProgressBar)) self.assertEqual(obj.widget.duration, 30) self.assertEqual(obj.widget.widgets[0].text, "Target: http://blazedemo.com:80") obj.check() obj.shutdown()
def test_simple(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.engine.aggregator = ConsolidatingAggregator() obj.engine.aggregator.add_listener(DataPointLogger()) obj.engine.config.merge({"provisioning": "test"}) if os.path.exists("/home/undera/Sources/phantom"): # FIXME: not good, get rid of it obj.settings.merge( { "path": "/home/undera/Sources/phantom/bin/phantom", "modules-path": "/home/undera/Sources/phantom/lib/phantom", } ) else: obj.settings.merge({"path": os.path.join(os.path.dirname(__file__), "..", "phantom.sh")}) obj.execution.merge( { "log-responses": "proto_error", # "iterations": 5000000, "concurrency": 10, "throughput": 1000, "ramp-up": "1m", # "steps": 5, "hold-for": "15", "scenario": { "timeout": 1, "default-address": "http://localhost:33", "headers": {"Connection": "close"}, "requests": [ # "/", { "url": "/api", "method": "POST", "headers": {"Content-Length": 0}, "body": {"param": "value"}, } ], }, } ) obj.engine.aggregator.prepare() obj.prepare() obj.engine.aggregator.startup() obj.startup() while not obj.check(): logging.debug("Running...") obj.engine.aggregator.check() time.sleep(1) obj.shutdown() obj.engine.aggregator.shutdown() obj.post_process() obj.engine.aggregator.post_process()
def test_simple(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.engine.aggregator = ConsolidatingAggregator() obj.engine.aggregator.add_listener(DataPointLogger()) obj.engine.config.merge({"provisioning": "test"}) if os.path.exists("/home/undera/Sources/phantom"): # FIXME: not good, get rid of it obj.settings.merge({ "path": "/home/undera/Sources/phantom/bin/phantom", "modules-path": "/home/undera/Sources/phantom/lib/phantom"}) else: obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), "..", "resources", "pbench", "phantom.sh")}) obj.execution.merge({ "log-responses": "proto_error", # "iterations": 5000000, "concurrency": 10, "throughput": 1000, "ramp-up": "1m", # "steps": 5, "hold-for": "15", "scenario": { "timeout": 1, "default-address": "http://localhost:33", "headers": { "Connection": "close" }, "requests": [ # "/", { "url": "/api", "method": "POST", "headers": { "Content-Length": 0 }, "body": { "param": "value"}}]}}) obj.engine.aggregator.prepare() obj.prepare() obj.engine.aggregator.startup() obj.startup() while not obj.check(): logging.debug("Running...") obj.engine.aggregator.check() time.sleep(1) obj.shutdown() obj.engine.aggregator.shutdown() obj.post_process() obj.engine.aggregator.post_process()
def test_install_pbench(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.settings.merge({"path": "/notexistent"}) # obj.execution = obj.engine.config['execution'][0] try: obj.prepare() self.fail() except RuntimeError as exc: self.assertEquals("Please install PBench tool manually", str(exc))
def test_install_pbench(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.settings.merge({"path": "/notexistent"}) # obj.execution = obj.engine.config['execution'][0] try: obj.prepare() self.fail() except ToolError as exc: self.assertEquals("Please install PBench tool manually", str(exc))
def test_same_address_port(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge(yaml.load(open(__dir__() + "/../yaml/phantom_request_same_address.yml").read())) obj.execution = obj.engine.config["execution"][0] obj.settings.merge({"path": os.path.join(os.path.dirname(__file__), "..", "phantom.sh")}) try: obj.prepare() self.fail() except ValueError: pass
def test_pbench_payload_py3_crash(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": {"requests": ["test%d" % i for i in range(20)]} }, "provisioning": "test", }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), "..", "resources", "pbench", "phantom.sh"), }) obj.prepare()
def test_pbench_script(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": {"script": __dir__() + "/../resources/pbench/pbench.src"} }, "provisioning": "test" }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), "..", "resources", "pbench", "phantom.sh"), }) obj.prepare()
def test_pbench_payload_py3_crash(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": {"requests": ["test%d" % i for i in range(20)]} }, "provisioning": "test", }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) obj.prepare()
def test_pbench_script(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": { "script": RESOURCES_DIR + "pbench/pbench.src" } }, "provisioning": "test" }) obj.execution = obj.engine.config['execution'] obj.settings.merge({"path": RESOURCES_DIR + "pbench/phantom.sh"}) obj.prepare()
def test_pbench_script(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": {"script": __dir__() + "/../data/pbench.src"} }, "provisioning": "test" }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) obj.prepare()
def test_pbench_payload_py3_crash(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: { "executor": "pbench", "scenario": { "requests": ["test%d" % i for i in range(20)] } }, "provisioning": "test", }) obj.execution = obj.engine.config['execution'] obj.settings.merge({ "path": RESOURCES_DIR + "pbench/phantom.sh", }) obj.prepare()
def test_same_address_port(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge( yaml.load( open(__dir__() + "/../yaml/phantom_request_same_address.yml").read())) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) try: obj.prepare() self.fail() except ValueError: pass
def test_improved_request_building(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config = BetterDict() obj.engine.config.merge(yaml.load(open(__dir__() + "/../yaml/phantom_improved_request.yml").read())) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), '..', "phantom.sh"), }) obj.prepare() with open(obj.pbench.schedule_file) as fds: config = fds.readlines() get_requests = [req_str.split(" ")[1] for req_str in config if req_str.startswith("GET")] self.assertEqual(len(get_requests), 2) for get_req in get_requests: self.assertEqual(dict(parse.parse_qsl(parse.urlsplit(get_req).query)), {"get_param1": "value1", "get_param2": "value2"})