def test_schedule_empty(self): executor = PBenchExecutor() executor.engine = EngineEmul() obj = Scheduler(executor.get_load(), StringIO("4 test\ntest\n"), logging.getLogger("")) for item in obj.generate(): logging.debug("Item: %s", item)
def check_schedule_size_estimate(self, obj, execution): obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: execution, "provisioning": "local", }) obj.execution = obj.engine.config['execution'] load = obj.get_load() obj.pbench = TaurusPBenchTool(obj, logging.getLogger('')) obj.pbench.generate_payload(obj.get_scenario()) payload_count = len(obj.get_scenario().get('requests', [])) sch = Scheduler(load, open(obj.pbench.payload_file, 'rb'), logging.getLogger('')) estimated_schedule_size = obj.pbench._estimate_schedule_size( load, payload_count) logging.debug("Estimated schedule size: %s", estimated_schedule_size) items = list(sch.generate()) actual_schedule_size = len(items) logging.debug("Actual schedule size: %s", actual_schedule_size) if actual_schedule_size != 0: error = abs(estimated_schedule_size - actual_schedule_size) error_rel = error / float(actual_schedule_size) logging.debug("Estimation error: %s", error) if error_rel >= 0.1: self.fail("Estimation failed (error=%s) on config %s" % (error_rel, pprint.pformat(execution)))
def test_schedule_rps(self): executor = PBenchExecutor() executor.engine = EngineEmul() executor.engine.config.merge({"provisioning": "test"}) rps = 9 rampup = 12 executor.execution.merge({ "throughput": rps, "ramp-up": rampup, "steps": 3, "hold-for": 0 }) obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger("")) cnt = 0 cur = 0 currps = 0 for item in obj.generate(): # logging.debug("Item: %s", item) if int(math.ceil(item[0])) != cur: # self.assertLessEqual(currps, rps) cur = int(math.ceil(item[0])) logging.debug("RPS: %s", currps) currps = 0 cnt += 1 currps += 1 logging.debug("RPS: %s", currps)
def test_schedule_throughput_only(self): executor = PBenchExecutor() executor.engine = EngineEmul() executor.execution.merge({"throughput": 5}) obj = Scheduler(executor.get_load(), io.BytesIO(b("5 test1\ntest1\n5 test2\ntest2\n")), logging.getLogger("")) items = list(obj.generate()) self.assertTrue(len(items) > 0)
def test_schedule_empty(self): executor = PBenchExecutor() executor.engine = EngineEmul() # concurrency: 1, iterations: 1 obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger("")) items = list(obj.generate()) for item in items: logging.debug("Item: %s", item) self.assertEqual(1, len(items))
def test_schedule_concurrency_steps(self): executor = PBenchExecutor() executor.engine = EngineEmul() executor.execution.merge({"concurrency": 5, "ramp-up": 10, "steps": 3}) obj = Scheduler(executor.get_load(), io.BytesIO(b("5 test1\ntest1\n5 test2\ntest2\n")), logging.getLogger("")) items = list(obj.generate()) self.assertEqual(8, len(items)) self.assertEqual(-1, items[5][0]) # instance became unlimited self.assertEqual(Scheduler.REC_TYPE_LOOP_START, items[6][5]) # looped payload
def test_schedule_empty(self): executor = PBenchExecutor() executor.engine = EngineEmul() try: obj = Scheduler(executor.get_load(), StringIO("4 test\ntest\n"), logging.getLogger("")) for item in obj.generate(): logging.debug("Item: %s", item) self.fail() except NotImplementedError: pass
def test_schedule_concurrency(self): executor = PBenchExecutor() executor.engine = EngineEmul() executor.execution.merge({"concurrency": 5, "ramp-up": 10, "hold-for": 5}) obj = Scheduler(executor.get_load(), StringIO("5 test1\ntest1\n5 test2\ntest2\n"), logging.getLogger("")) items = list(obj.generate()) logging.debug("%s", items) self.assertEqual(8, len(items)) self.assertEqual(-1, items[5][0]) # instance became unlimited self.assertEqual(1, items[6][5]) # looped payload
def test_schedule_concurrency(self): executor = PBenchExecutor() executor.engine = EngineEmul() executor.execution.merge({ "concurrency": 5, "ramp-up": 10, "hold-for": 5 }) obj = Scheduler(executor.get_load(), StringIO("5 test1\ntest1\n5 test2\ntest2\n"), logging.getLogger("")) items = list(obj.generate()) logging.debug("%s", items) self.assertEqual(8, len(items)) self.assertEqual(-1, items[5][0]) # instance became unlimited self.assertEqual(1, items[6][5]) # looped payload
def check_schedule_size_estimate(self, execution): self.configure({ ScenarioExecutor.EXEC: execution, "provisioning": "local", }) load = self.obj.get_load() self.obj.generator = TaurusPBenchGenerator(self.obj, ROOT_LOGGER) self.obj.generator.generate_payload(self.obj.get_scenario()) payload_count = len(self.obj.get_scenario().get('requests', [])) sch = Scheduler(load, self.obj.generator.payload_file, ROOT_LOGGER) estimated_schedule_size = self.obj.generator._estimate_schedule_size(load, payload_count) ROOT_LOGGER.debug("Estimated schedule size: %s", estimated_schedule_size) items = list(sch.generate()) actual_schedule_size = len(items) ROOT_LOGGER.debug("Actual schedule size: %s", actual_schedule_size) if actual_schedule_size != 0: error = abs(estimated_schedule_size - actual_schedule_size) error_rel = error / float(actual_schedule_size) ROOT_LOGGER.debug("Estimation error: %s", error) if error_rel >= 0.1: self.fail("Estimation failed (error=%s) on config %s" % (error_rel, pprint.pformat(execution)))
def check_schedule_size_estimate(self, execution): self.obj.engine.config.merge({ ScenarioExecutor.EXEC: execution, "provisioning": "local", }) self.obj.execution = self.obj.engine.config['execution'] load = self.obj.get_load() self.obj.pbench = TaurusPBenchTool(self.obj, logging.getLogger('')) self.obj.pbench.generate_payload(self.obj.get_scenario()) payload_count = len(self.obj.get_scenario().get('requests', [])) sch = Scheduler(load, self.obj.pbench.payload_file, logging.getLogger('')) estimated_schedule_size = self.obj.pbench._estimate_schedule_size(load, payload_count) logging.debug("Estimated schedule size: %s", estimated_schedule_size) items = list(sch.generate()) actual_schedule_size = len(items) logging.debug("Actual schedule size: %s", actual_schedule_size) if actual_schedule_size != 0: error = abs(estimated_schedule_size - actual_schedule_size) error_rel = error / float(actual_schedule_size) logging.debug("Estimation error: %s", error) if error_rel >= 0.1: self.fail("Estimation failed (error=%s) on config %s" % (error_rel, pprint.pformat(execution)))
def test_schedule_rps(self): executor = PBenchExecutor() executor.engine = EngineEmul() executor.engine.config.merge({"provisioning": "test"}) rps = 9 rampup = 12 executor.execution.merge({"throughput": rps, "ramp-up": rampup, "steps": 3, "hold-for": 0}) obj = Scheduler(executor.get_load(), io.BytesIO(b("4 test\ntest\n")), logging.getLogger("")) cnt = 0 cur = 0 currps = 0 for item in obj.generate(): # logging.debug("Item: %s", item) if int(math.ceil(item[0])) != cur: # self.assertLessEqual(currps, rps) cur = int(math.ceil(item[0])) logging.debug("RPS: %s", currps) currps = 0 cnt += 1 currps += 1 logging.debug("RPS: %s", currps)