def test_speed(self): obj = self.obj mock = MockReader() mock.buffer_scale_idx = '100.0' obj.add_listener(mock) res = {} # current measurements shows ~25K samples/sec for cnt in (10000, 25000, 50000): for a in range(0, cnt): sample = (cnt, "", 1, r(1000), r(1000), r(1000), rc(), err(), '', 0) mock.data.append(sample) before = time.time() for point in mock.datapoints(): pass after = time.time() res[cnt] = after - before ROOT_LOGGER.info("Times: %s", res) while mock.results: point = mock.results.pop(0) overall = point[DataPoint.CURRENT][''] self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0) for point in mock.datapoints(True): pass
def assertFilesEqual(expected, actual, replace_str="", replace_with="", python_files=False): def order(line): line = line.replace(',', ' ,') # for imports line = line.replace('(', '( ') # for line = line.replace(')', ' )') # calls line = line.split(" ") line.sort() return ' '.join(line) def equal_by_content(diff): # todo: it doesn't show diff for follow case, shouldn't we fix it? # 01: + func1() # 02: func2() # 03: - func1() # func1 moved and order has been changed act_lines = [line[1:] for line in diff if line.startswith('-')] exp_lines = [line[1:] for line in diff if line.startswith('+')] for pair in zip(act_lines, exp_lines): if order(pair[0]) != order(pair[1]): return False return True if isinstance(replace_str, str): replace_str = [replace_str] if isinstance(replace_with, str): replace_with = [replace_with] with open(expected) as exp, open(actual) as act: act_lines = act.readlines() exp_lines = exp.readlines() subs = dict(zip(replace_str, replace_with)) subs.update({'<': '< ', '>': ' >'}) # for xml for key in subs: act_lines = [x.replace(key, subs[key]).rstrip() for x in act_lines] exp_lines = [x.replace(key, subs[key]).rstrip() for x in exp_lines] if python_files: act_lines = astunparse.unparse(ast.parse( '\n'.join(act_lines))).split('\n') exp_lines = astunparse.unparse(ast.parse( '\n'.join(exp_lines))).split('\n') diff = list(difflib.unified_diff(exp_lines, act_lines)) if diff and not equal_by_content(diff[2:]): ROOT_LOGGER.info("Replacements are: %s => %s", replace_str, replace_with) msg = "Failed asserting that two files are equal:\n%s\nversus\n%s\nDiff is:\n\n%s" # here we show full diff, even equal_by_content # todo: show only really different lines raise AssertionError(msg % (actual, expected, "\n".join(diff)))
def test_server_agent(self): obj = Monitoring() obj.engine = EngineEmul() obj.parameters.merge({ "server-agent": [{ "address": "127.0.0.1:4444", "logging": "True", "metrics": ["cpu", "disks"] }, { "address": "10.0.0.1", "metrics": ["something1", "something2"] }] }) listener = LoggingMonListener() obj.add_listener(listener) widget = obj.get_widget() obj.add_listener(widget) crit_conf = BetterDict.from_dict({ "condition": ">", "threshold": 5, "subject": "127.0.0.1:4444/cpu" }) criteria = MonitoringCriteria(crit_conf, obj) obj.add_listener(criteria) obj.client_classes = {'server-agent': ServerAgentClientEmul} obj.prepare() obj.startup() for i in range(1, 10): obj.clients[0].socket.recv_data += b("%s\t%s\t\n" % (i, i * 10)) obj.check() ROOT_LOGGER.debug("Criteria state: %s", criteria) time.sleep(obj.engine.check_interval) obj.shutdown() obj.post_process() self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"), obj.clients[0].socket.sent_data) self.assertIsNotNone(obj.clients[0].logs_file) with open(obj.clients[0].logs_file) as serveragent_logs: logs_reader = csv.reader(serveragent_logs) logs_reader = list(logs_reader) self.assertEquals(['ts', 'cpu', 'disks'], logs_reader[0]) for i in range(1, 10): self.assertEquals([str(i), str(i * 10)], logs_reader[i][1:])
def tearDown(self): exc, _, _ = sys.exc_info() if exc: try: if hasattr(self, 'obj') and isinstance(self.obj, SelfDiagnosable): diags = self.obj.get_error_diagnostics() if diags: for line in diags: ROOT_LOGGER.info(line) except BaseException: pass if self.captured_logger: self.captured_logger.removeHandler(self.log_recorder) self.log_recorder.close()
def tearDown(self): exc, _, _ = sys.exc_info() if exc: try: if hasattr(self, 'obj') and isinstance(self.obj, ScenarioExecutor): diags = self.obj.get_error_diagnostics() if diags: for line in diags: ROOT_LOGGER.info(line) except BaseException: pass if self.captured_logger: self.captured_logger.removeHandler(self.log_recorder) self.log_recorder.close() sys.stdout = self.stdout_backup super(BZTestCase, self).tearDown()
def test_prepare(self): config = json.loads(open(RESOURCES_DIR + "json/passfail.json").read()) self.configure(config['reporting'][0]) self.obj.prepare() self.assertGreater(len(self.obj.criteria), 0) for n in range(0, 10): point = random_datapoint(n) ROOT_LOGGER.info("%s: %s", n, point) self.obj.aggregated_second(point) try: self.obj.check() except AutomatedShutdown: pass try: self.obj.post_process() except AutomatedShutdown: pass
def test_binary_unicode_error(self): fd, fname = mkstemp() os.close(fd) file_handler = logging.FileHandler(fname, encoding="utf-8") file_handler.setLevel(logging.DEBUG) ROOT_LOGGER.addHandler(file_handler) try: session = Session(data={'id': 1}) mock = BZMock(session) mock.mock_post['https://data.blazemeter.com/api/v4/image/1/files?signature=None'] = {"result": 1} binary_file = os.path.join(RESOURCES_DIR, 'gatling', 'simulations.jar') with open(binary_file, 'rb') as fds: zip_content = fds.read() session.upload_file("jtls_and_more.zip", zip_content) finally: ROOT_LOGGER.removeHandler(file_handler) file_handler.close() os.remove(fname)
def test_stdev_performance(self): start = time.time() self.configure(RESOURCES_DIR + "jmeter/jtl/slow-stdev.jtl") res = list(self.obj.datapoints(final_pass=True)) lst_json = to_json(res) self.assertNotIn('"perc": {},', lst_json) elapsed = time.time() - start ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res)) # self.assertLess(elapsed, len(res)) # less than 1 datapoint per sec is a no-go exp = [0.53060066889723, 0.39251356581014, 0.388405157629, 0.38927586980868, 0.30511697736531, 0.21160424043633, 0.07339064994943] self.assertEqual(exp, [round(x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME], 14) for x in res])
def test_public_report(self): mock = BZMock() mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []} }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}}, 'https://a.blazemeter.com/api/v4/tests': {'result': {'id': 'unittest1'}}, 'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': {"result": { 'session': {'id': 'sess1', 'userId': 1, 'testId': 1}, 'master': {'id': 'master1', 'userId': 1}, 'signature': '' }}, 'https://a.blazemeter.com/api/v4/masters/master1/public-token': {'result': {'publicToken': 'publicToken'}}, 'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': { "result": {'session': {}}}, 'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': {'result': True}, }) obj = BlazeMeterUploader() obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.settings['public-report'] = True obj.settings['send-monitoring'] = False obj.engine = EngineEmul() mock.apply(obj._user) self.sniff_log(obj.log) obj.prepare() obj.startup() obj.aggregated_second(random_datapoint(10)) obj.check() obj.shutdown() obj.post_process() log_buff = self.log_recorder.info_buff.getvalue() log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary" self.assertIn(log_line, log_buff) ROOT_LOGGER.warning("\n".join([x['url'] for x in mock.requests])) self.assertEqual(14, len(mock.requests))
def monitoring_data(self, data): ROOT_LOGGER.debug("Data: %s", data)