def test_new_project_existing_test(self): obj = BlazeMeterUploader() mock = BZMock(obj._user) mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {'result': [ {'id': 1, 'name': 'Taurus Test', 'configuration': {"type": 'external'}} ]}, 'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {'result': []} }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}}, 'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}}, }) obj.parameters['project'] = 'Proj name' obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() obj.prepare() self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[4]['url']) self.assertEquals('POST', mock.requests[4]['method']) self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[6]['url']) self.assertEquals('POST', mock.requests[6]['method'])
def test_graphite(self): obj = Monitoring() obj.engine = EngineEmul() obj.parameters.merge({ "graphite": [{ "address": "people.com:1066", "label": "Earth", "logging": True, "metrics": ["body", "brain"] }, { "address": "http://spirits.net", "metrics": ["transparency", "usability"] }] }) obj.client_classes = {'graphite': GraphiteClientEmul} obj.prepare() obj.startup() obj.check() obj.clients[0]._last_check -= obj.clients[0].interval * 2 obj.check() obj.clients[0]._last_check -= obj.clients[0].interval * 2 obj.clients[0].prepared_data = "wrong data" obj.check() obj.shutdown() obj.post_process() self.assertIsNotNone(obj.clients[0].logs_file) self.assertIsNone(obj.clients[1].logs_file) with open(obj.clients[0].logs_file) as graphite_logs: logs_reader = csv.reader(graphite_logs) logs_reader = list(logs_reader) self.assertEquals(['ts', 'body', 'brain'], logs_reader[0]) self.assertEquals(['2'], logs_reader[1][1:])
def test_new_project_new_test(self): obj = BlazeMeterUploader() mock = BZMock(obj._user) mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': { 'result': [] }, 'https://a.blazemeter.com/api/v4/projects?workspaceId=1': { 'result': [] } }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': { "result": { "id": 1 } }, 'https://a.blazemeter.com/api/v4/tests': { "result": { "id": 1 } }, }) obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() obj.prepare() self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[6]['url']) self.assertEquals('POST', mock.requests[6]['method']) self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[7]['url']) self.assertEquals('POST', mock.requests[7]['method'])
def test_anonymous_feeding(self): obj = BlazeMeterUploader() obj.engine = EngineEmul() obj.browser_open = False mock = BZMock(obj._user) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/sessions': {"result": { "signature": "sign", "publicTokenUrl": "publicUrl", "session": {"id": 1, "testId": 1, "userId": 1}, "master": {"id": 1}, }}, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {}, 'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': {"result": True}, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': { 'result': {'session': {}}}, }) obj.prepare() obj.startup() obj.check() obj.shutdown() obj.post_process() self.assertEquals(1, obj._session['id']) self.assertEqual(6, len(mock.requests), "Requests were: %s" % mock.requests)
def test_some_errors(self): mock = BZMock() mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': { "result": [] }, 'https://a.blazemeter.com/api/v4/projects?workspaceId=1&name=Proj+name': { "result": [] }, 'https://a.blazemeter.com/api/v4/sessions/1': { "result": { 'id': 1, "note": "somenote" } }, 'https://a.blazemeter.com/api/v4/masters/1': { "result": { 'id': 1, "note": "somenote" } }, }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': { "result": { 'id': 1 } }, 'https://a.blazemeter.com/api/v4/tests': { "result": { 'id': 1 } }, 'https://a.blazemeter.com/api/v4/tests/1/start-external': { "result": { "session": { 'id': 1, "testId": 1, "userId": 1 }, "master": { 'id': 1 }, "signature": "sign" } }, 'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': { "result": True }, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1' + '&pq=0&target=labels_bulk&update=1': {}, 'https://a.blazemeter.com/api/v4/sessions/1/stop': { "result": True }, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': { 'result': { 'session': {} } } }) mock.mock_patch.update({ 'https://a.blazemeter.com/api/v4/sessions/1': { "result": { "id": 1, "note": "somenote" } }, 'https://a.blazemeter.com/api/v4/masters/1': { "result": { "id": 1, "note": "somenote" } }, }) obj = BlazeMeterUploader() mock.apply(obj._user) obj.parameters['project'] = 'Proj name' obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() obj.prepare() obj.startup() obj.engine.stopping_reason = ValueError('wrong value') obj.aggregated_second(random_datapoint(10)) obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [{ 'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111', 'tag': None }, { 'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222' }, { 'msg': 'Not Found', 'cnt': 10, 'type': KPISet.ERRTYPE_SUBSAMPLE, 'urls': { '/non': '404' }, KPISet.RESP_CODES: '404', 'tag': None }] obj.post_process() obj.log.info("Requests: %s", mock.requests) # check for note appending in _postproc_phase3() reqs = mock.requests[-4:] self.assertIn('api/v4/sessions/1', reqs[0]['url']) self.assertIn('api/v4/sessions/1', reqs[1]['url']) self.assertIn('api/v4/masters/1', reqs[2]['url']) self.assertIn('api/v4/masters/1', reqs[3]['url']) self.assertIn('ValueError: wrong value', str(reqs[1]['data'])) self.assertIn('ValueError: wrong value', str(reqs[3]['data'])) labels = mock.requests[8]['data'] if not isinstance(labels, str): labels = labels.decode("utf-8") obj.log.info("Labels: %s", labels) data = json.loads(str(labels)) self.assertEqual(1, len(data['labels'])) total_item = data['labels'][0] self.assertEqual('ALL', total_item['name']) self.assertEqual(total_item['assertions'], [{ 'failureMessage': 'Forbidden', 'failures': 10, 'name': 'All Assertions' }]) self.assertEqual(total_item['errors'], [{ 'm': 'Allowed', 'count': 20, 'rc': '222' }]) self.assertEqual(total_item['failedEmbeddedResources'], [{ 'url': '/non', 'count': 10, 'rc': '404', 'rm': 'Not Found' }])
def test_no_notes_for_public_reporting(self): mock = BZMock() mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/sessions/1/terminate-external': {}, 'https://data.blazemeter.com/submit.php?session_id=1&signature=None&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {}, }) obj = BlazeMeterUploader() obj.parameters['project'] = 'Proj name' obj.settings['token'] = '' # public reporting obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() mock.apply(obj._user) obj.prepare() obj._session = Session(obj._user, {'id': 1, 'testId': 1, 'userId': 1}) obj._master = Master(obj._user, {'id': 1}) obj.engine.stopping_reason = ValueError('wrong value') obj.aggregated_second(random_datapoint(10)) obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [{ 'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111', 'tag': "" }, { 'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222' }] obj.send_monitoring = False obj.post_process() # TODO: looks like this whole block of checks is useless # check for note appending in _postproc_phase3() reqs = [{ 'url': '', 'data': '' } for _ in range(4)] # add template for minimal size reqs = (reqs + mock.requests)[-4:] self.assertNotIn('api/v4/sessions/1', reqs[0]['url']) self.assertNotIn('api/v4/sessions/1', reqs[1]['url']) self.assertNotIn('api/v4/masters/1', reqs[2]['url']) self.assertNotIn('api/v4/masters/1', reqs[3]['url']) if reqs[1]['data']: self.assertNotIn('ValueError: wrong value', reqs[1]['data']) if reqs[3]['data']: self.assertNotIn('ValueError: wrong value', reqs[3]['data'])
def test_exclude_problematic(self): obj = InstallChecker() obj.engine = EngineEmul() obj.engine.config.get("modules")["err"] = "hello there" obj.settings["exclude"] = ["err"] self.assertRaises(NormalShutdown, obj.prepare)
def setUp(self): engine = EngineEmul() engine.config.merge({'services': {'pip-installer': []}}) self.obj = PipInstaller() self.obj.engine = engine
def setUp(self): super(TestConverter, self).setUp() self.engine = EngineEmul()
def setUp(self): super(ExecutorTestCase, self).setUp() self.engine = EngineEmul() self.obj = self.EXECUTOR() self.obj.engine = self.engine
def test_xml_report_test_duration_failed_prepare(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() obj.aggregated_second(self.__get_datapoint(ts=100)) obj.post_process() # shouldn't raise ValueError because obj.start_time is None
def setUp(self): super(TestSwagger2YAML, self).setUp() self.engine = EngineEmul()
class TestSwagger2YAML(BZTestCase): def setUp(self): super(TestSwagger2YAML, self).setUp() self.engine = EngineEmul() def configure(self, options, source): self.tool = Swagger2YAML(options, source) self.clean_log() def _get_tmp(self, prefix='test', suffix='.yml'): return self.engine.create_artifact(prefix, suffix) def test_convert(self): source = RESOURCES_DIR + "/swagger/petstore.json" expected = RESOURCES_DIR + "/swagger/petstore-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_scenarios_from_paths(self): source = RESOURCES_DIR + "/swagger/bzm-api.json" expected = RESOURCES_DIR + "/swagger/bzm-api-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result, scenarios_from_paths=True) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_security_apikey_header(self): source = RESOURCES_DIR + "/swagger/auth-key.json" expected = RESOURCES_DIR + "/swagger/auth-key-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_security_basic(self): source = RESOURCES_DIR + "/swagger/auth-basic.json" expected = RESOURCES_DIR + "/swagger/auth-basic-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_security_basic_local(self): source = RESOURCES_DIR + "/swagger/auth-basic-local.json" expected = RESOURCES_DIR + "/swagger/auth-basic-local-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_security_apikey_query(self): source = RESOURCES_DIR + "/swagger/auth-key-as-param.json" expected = RESOURCES_DIR + "/swagger/auth-key-as-param-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_interpolation_values(self): source = RESOURCES_DIR + "/swagger/bzm-api.json" expected = RESOURCES_DIR + "/swagger/bzm-converted-values.yaml" result = self._get_tmp() options = FakeOptions(file_name=result) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_interpolation_variables(self): source = RESOURCES_DIR + "/swagger/bzm-api.json" expected = RESOURCES_DIR + "/swagger/bzm-converted-variables.yaml" result = self._get_tmp() options = FakeOptions( file_name=result, parameter_interpolation=Swagger.INTERPOLATE_WITH_JMETER_VARS) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_interpolation_none(self): source = RESOURCES_DIR + "/swagger/bzm-api.json" expected = RESOURCES_DIR + "/swagger/bzm-converted-none.yaml" result = self._get_tmp() options = FakeOptions( file_name=result, parameter_interpolation=Swagger.INTERPOLATE_DISABLE) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected) def test_convert_security_apikey_multiscenarios(self): source = RESOURCES_DIR + "/swagger/auth-key.json" expected = RESOURCES_DIR + "/swagger/auth-key-multiscenarios-converted.yaml" result = self._get_tmp() options = FakeOptions(file_name=result, scenarios_from_paths=True) self.configure(options, source) self.tool.process() actual = yaml.full_load(open(result).read()) expected = yaml.full_load(open(expected).read()) self.assertEqual(actual, expected)
def setUp(self): super(TestEngine, self).setUp() self.obj = EngineEmul() self.paths = local_paths_config()
def test_check(self): mock = BZMock() mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': { "result": [] }, 'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': { "result": [] }, 'https://a.blazemeter.com/api/v4/projects?workspaceId=1&name=Proj+name': { "result": [] }, }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': { "result": { "id": 1, "name": "boo", "userId": 2, "description": None, "created": time.time(), "updated": time.time(), "organizationId": None } }, 'https://a.blazemeter.com/api/v4/tests': { "result": { 'id': 1 } }, 'https://a.blazemeter.com/api/v4/tests/1/start-external': { "result": { 'session': { 'id': 1, 'userId': 1, 'testId': 1 }, 'master': { 'id': 1, 'userId': 1 }, 'signature': 'sign' } }, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': [ {}, { "result": { 'session': { "statusCode": 140, 'status': 'ENDED' } } }, {}, ], 'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': [ IOError("monitoring push expected fail"), { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, ], 'https://a.blazemeter.com/api/v4/sessions/1/stop': {}, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': { "result": { 'session': {} } } }) obj = BlazeMeterUploader() obj.parameters['project'] = 'Proj name' obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() shutil.copy( __file__, os.path.join(obj.engine.artifacts_dir, os.path.basename(__file__))) mock.apply(obj._user) obj._user.timeout = 0.1 obj.prepare() obj.startup() for x in range(0, 31): obj.aggregated_second(random_datapoint(x)) mon = [{ "ts": 1, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100, "other": 0 }] obj.monitoring_data(mon) obj.check() for x in range(32, 65): obj.aggregated_second(random_datapoint(x)) obj.last_dispatch = time.time() - 2 * obj.send_interval self.assertRaises(KeyboardInterrupt, obj.check) obj.aggregated_second(random_datapoint(10)) obj.shutdown() log_file = obj.engine.create_artifact('log', '.tmp') handler = logging.FileHandler(log_file) obj.engine.log.parent.addHandler(handler) obj.engine.config.get('modules').get('shellexec').get( 'env')['TAURUS_INDEX_ALL'] = 1 obj.post_process() self.assertEqual(20, len(mock.requests)) obj.engine.log.parent.removeHandler(handler)
def test_public_report(self): mock = BZMock() mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': { "result": [] } }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': { "result": { 'id': 1 } }, 'https://a.blazemeter.com/api/v4/tests': { 'result': { 'id': 'unittest1' } }, 'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': { "result": { 'session': { 'id': 'sess1', 'userId': 1, 'testId': 1 }, 'master': { 'id': 'master1', 'userId': 1 }, 'signature': '' } }, 'https://a.blazemeter.com/api/v4/masters/master1/public-token': { 'result': { 'publicToken': 'publicToken' } }, 'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': { "result": { 'session': {} } }, 'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': { 'result': True }, }) obj = BlazeMeterUploader() obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.settings['public-report'] = True obj.settings['send-monitoring'] = False obj.engine = EngineEmul() mock.apply(obj._user) self.sniff_log(obj.log) obj.prepare() obj.startup() obj.aggregated_second(random_datapoint(10)) obj.check() obj.shutdown() obj.post_process() log_buff = self.log_recorder.info_buff.getvalue() log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary" self.assertIn(log_line, log_buff) ROOT_LOGGER.warning("\n".join([x['url'] for x in mock.requests])) self.assertEqual(14, len(mock.requests))
def test_extend_datapoints(self): # check reported data format conversion for test state filtering on BM side def get_mock(origin_func, store): # generate replacement for BlazemeterUploader._dpoint_serializer.get_kpi_body def mock_get_kpi_body(data, isfinal): store.append(data) # save received data for verifying return origin_func( data, isfinal) # call original get_kpi_body as well return mock_get_kpi_body mock = BZMock() mock.mock_get.update({ '1': { "result": [] }, 'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': { "result": [] }, '3': { "result": [] }, }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': { "result": { "id": 1 } }, 'https://a.blazemeter.com/api/v4/tests': { "result": { 'id': 1 } }, 'https://a.blazemeter.com/api/v4/tests/1/start-external': { "result": { 'session': { 'id': 1, 'userId': 1, 'testId': 1 }, 'master': { 'id': 1, 'userId': 1 }, 'signature': 'sign' } }, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': [ {}, { "result": { 'session': { "statusCode": 140, 'status': 'ENDED' } } }, {}, ], 'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': [ IOError("monitoring push expected fail"), { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, { "result": True }, ], 'https://a.blazemeter.com/api/v4/sessions/1/stop': {}, 'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': { "result": { 'session': {} } } }) obj = BlazeMeterUploader() sent_data_points = [] obj._dpoint_serializer.get_kpi_body = get_mock( obj._dpoint_serializer.get_kpi_body, sent_data_points) obj.parameters['project'] = 'Proj name' obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() aggregator = ConsolidatingAggregator() aggregator.engine = obj.engine aggregator.settings['extend-aggregation'] = True reader = MockReader() watcher = MockReader() reader.buffer_scale_idx = '100.0' # data format: t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count reader.data.append((1, "a", 1, 1, 1, 1, 200, None, '', 1)) reader.data.append((2, "b", 1, 2, 2, 2, 200, 'OK', '', 2)) reader.data.append((2, "b", 1, 3, 3, 3, 404, "Not Found", '', 3)) reader.data.append((2, "c", 1, 4, 4, 4, 200, None, '', 4)) reader.data.append((3, "d", 1, 5, 5, 5, 200, None, '', 5)) reader.data.append((5, "b", 1, 6, 6, 6, 200, None, '', 6)) reader.data.append((5, "c", 1, 7, 7, 7, 200, None, '', 7)) original_labels = list(d[1] for d in reader.data) aggregator.add_underling(reader) aggregator.add_listener(watcher) obj.engine.aggregator = aggregator mock.apply(obj._user) obj._user.timeout = 0.001 obj.engine.aggregator.prepare() obj.prepare() obj.engine.aggregator.startup() obj.startup() obj.engine.aggregator.check() obj.check() obj.engine.aggregator.shutdown() obj.shutdown() obj.engine.aggregator.post_process() obj.post_process() sent_data_points = sent_data_points[0] + sent_data_points[1] state_labels = [0, 1, 2] for dp in sent_data_points: for data in dp['cumulative'], dp['current']: for label in data: self.assertIn(label, original_labels + ['']) self.assertIsInstance(data[label], dict) for key in data[label]: self.assertIn(key, state_labels)
def configure(self, jtl_file): engine = EngineEmul() self.obj = FuncJTLReader(jtl_file, engine, ROOT_LOGGER)
def get_mqtt_sample(config): request = BetterDict.from_dict(config) request = MQTTRequest(request, {}) engine = EngineEmul() sample, _ = MQTTProtocolHandler({}, engine).get_sampler_pair(request) return sample, request
def setUp(self): super(TestConsolidatingAggregator, self).setUp() self.obj = ConsolidatingAggregator() self.obj.engine = EngineEmul()
def setUp(self): super(TestPassFailStatus, self).setUp() self.obj = PassFailStatusMock() self.obj.engine = EngineEmul()
def test_problematic(self): obj = InstallChecker() obj.engine = EngineEmul() obj.engine.config.get("modules")["err"] = "hello there" self.assertRaises(ToolError, obj.prepare)
class TestEngine(BZTestCase): def setUp(self): super(TestEngine, self).setUp() self.obj = EngineEmul() self.paths = local_paths_config() self.tmp_selenium = bzt.modules._selenium.Selenium bzt.modules._selenium.Selenium = MockPythonTool self.tmp_apiritif_selenium = bzt.modules._apiritif.executor.Selenium bzt.modules._apiritif.executor.Selenium = MockPythonTool def tearDown(self): super(TestEngine, self).tearDown() bzt.modules._selenium.Selenium = self.tmp_selenium bzt.modules._apiritif.executor.Selenium = self.tmp_apiritif_selenium def test_find_file(self): self.sniff_log(self.obj.log) config = RESOURCES_DIR + "json/get-post.json" configs = [config, self.paths] self.obj.configure(configs) self.assertEqual(2, len(self.obj.file_search_paths)) self.obj.find_file(config) self.assertEqual("", self.log_recorder.warn_buff.getvalue()) self.obj.find_file("reporting.json") self.assertIn("Guessed location", self.log_recorder.warn_buff.getvalue()) self.obj.find_file("definitely_missed.file") self.assertIn("Could not find", self.log_recorder.warn_buff.getvalue()) self.obj.find_file("http://localhost:8000/BlazeDemo.html") self.assertIn("Downloading http://localhost:8000/BlazeDemo.html", self.log_recorder.info_buff.getvalue()) def test_missed_config(self): configs = ['definitely_missed.file'] try: self.obj.configure(configs) self.fail() except TaurusConfigError as exc: self.assertIn('reading config file', str(exc)) def test_configuration_smoothness(self): def find_ad_dict_ed(*args): if isinstance(args[0], dict) and not isinstance(args[0], BetterDict): raise BaseException("dict found in Configuration") configs = [ RESOURCES_DIR + "json/get-post.json", self.paths] self.obj.configure(configs) self.assertTrue(isinstance(self.obj.config, Configuration)) BetterDict.traverse(self.obj.config, find_ad_dict_ed) def test_requests(self): configs = [ RESOURCES_DIR + "json/get-post.json", RESOURCES_DIR + "json/reporting.json", self.paths] self.obj.configure(configs) self.obj.prepare() for executor in self.obj.provisioning.executors: executor.env.set({"TEST_MODE": "files"}) self.obj.run() self.obj.post_process() def test_double_exec(self): configs = [ RESOURCES_DIR + "yaml/triple.yml", RESOURCES_DIR + "json/reporting.json", self.paths ] self.obj.configure(configs) self.obj.prepare() self.assertEquals(1, len(self.obj.services)) for executor in self.obj.provisioning.executors: executor.env.set({"TEST_MODE": "files"}) self.obj.run() self.obj.post_process() def test_unknown_module(self): configs = [ RESOURCES_DIR + "json/gatling.json", self.paths ] self.obj.configure(configs) self.obj.config["provisioning"] = "unknown" self.obj.config["modules"]["unknown"] = BetterDict() self.assertRaises(TaurusConfigError, self.obj.prepare) def test_null_aggregator(self): self.obj.config.merge({ "execution": [{ "scenario": { "requests": [{"url": "http://example.com/"}], }}], "settings": { "aggregator": None, "default-executor": "jmeter", }, "modules": { "local": "bzt.modules.provisioning.Local", "jmeter": {"class": "tests.unit.modules.jmeter.MockJMeterExecutor", "detect-plugins": False, "protocol-handlers": {"http": "bzt.jmx.http.HTTPProtocolHandler"}}, }}) self.obj.unify_config() self.obj.prepare() def test_yaml_multi_docs(self): configs = [ RESOURCES_DIR + "yaml/multi-docs.yml", self.paths ] self.obj.configure(configs) self.obj.prepare() self.assertEqual(len(self.obj.config["execution"]), 2) def test_json_format_regression(self): configs = [ RESOURCES_DIR + "json/json-but-not-yaml.json" ] self.obj.configure(configs) self.obj.prepare() def test_invalid_format(self): configs = [ RESOURCES_DIR + "jmeter-dist-3.0.zip" ] self.assertRaises(TaurusConfigError, lambda: self.obj.configure(configs)) def test_included_configs(self): configs = [ RESOURCES_DIR + "yaml/included-level1.yml", ] self.obj.configure(configs) self.assertTrue(self.obj.config["level1"]) self.assertTrue(self.obj.config["level2"]) self.assertTrue(self.obj.config["level3"]) self.assertListEqual(['included-level2.yml', 'included-level3.yml'], self.obj.config["included-configs"]) def test_check_for_updates(self): def mock_http_client(): return MockClient() self.sniff_log(self.obj.log) try: http_client = self.obj.get_http_client version = bzt.engine.engine.VERSION bzt.engine.engine.VERSION = '1.1.1' self.obj.get_http_client = mock_http_client self.obj._check_updates('bla-bla') warnings = self.log_recorder.warn_buff.getvalue() self.assertNotIn('Failed to check for updates', warnings) self.assertIn('There is newer version of Taurus', warnings) finally: self.obj.get_http_client = http_client bzt.engine.engine.VERSION = version def test_included_configs_cycle(self): configs = [ RESOURCES_DIR + "yaml/included-circular1.yml", ] self.obj.configure(configs) self.assertTrue(self.obj.config["level1"]) self.assertTrue(self.obj.config["level2"]) self.assertListEqual(['included-circular2.yml', 'included-circular1.yml', 'included-circular2.yml'], self.obj.config["included-configs"]) def test_env_eval(self): configs = [ RESOURCES_DIR + "yaml/env-eval.yml", ] os.environ["BZT_ENV_TEST_UNSET"] = "set" try: self.obj.configure(configs) self.obj.eval_env() self.assertEquals("success/top", self.obj.config["toplevel"]) self.assertEquals("success/test/", self.obj.config["settings"]["artifacts-dir"]) self.assertEquals("http://success/", self.obj.config["scenarios"]["scen1"]["default-address"]) self.assertEquals("/success/", self.obj.config["scenarios"]["scen1"]["requests"][0]) self.assertNotEquals("/${PATH}/", self.obj.config["scenarios"]["scen1"]["requests"][1]) self.assertEquals("/${TEMP}/", self.obj.config["scenarios"]["scen1"]["requests"][2]) self.assertEquals("/" + self.obj.artifacts_dir + "/", self.obj.config["scenarios"]["scen1"]["requests"][3]) finally: if "BZT_ENV_TEST" in os.environ: os.environ.pop("BZT_ENV_TEST") if "BZT_ENV_TEST_UNSET" in os.environ: os.environ.pop("BZT_ENV_TEST_UNSET") def test_nested_env_eval(self): try: self.obj.config.merge({ "settings": { "env": { "FOO": "${BAR}/aaa/bbb", "FOOBAR": "eee", "BAR": "${BAZ}/ccc", "BAZ": "${FOOBAR}/ddd", "ART": "${FOO}===${TAURUS_ARTIFACTS_DIR}", } }}) self.obj.eval_env() self.assertEqual("${FOOBAR}/ddd/ccc/aaa/bbb", self.obj.config["settings"]["env"]["FOO"]) self.assertEqual("${FOOBAR}/ddd/ccc/aaa/bbb", os.environ["FOO"]) self.assertEqual("eee", self.obj.config["settings"]["env"]["FOOBAR"]) self.assertEqual("eee", os.environ["FOOBAR"]) self.assertEqual("eee/ddd/ccc", self.obj.config["settings"]["env"]["BAR"]) self.assertEqual("eee/ddd/ccc", os.environ["BAR"]) self.assertEqual("eee/ddd", self.obj.config["settings"]["env"]["BAZ"]) self.assertEqual("eee/ddd", os.environ["BAZ"]) self.assertEqual("eee/ddd/ccc/aaa/bbb==={}".format( self.obj.config["settings"]["env"]["TAURUS_ARTIFACTS_DIR"]), self.obj.config["settings"]["env"]["ART"]) self.assertEqual("eee/ddd/ccc/aaa/bbb==={}".format( self.obj.config["settings"]["env"]["TAURUS_ARTIFACTS_DIR"]), os.environ["ART"]) self.log.debug("env.ART: {}, os.env.ART: {}".format( self.obj.config["settings"]["env"]["ART"], os.environ["ART"])) finally: if "FOO" in os.environ: os.environ.pop("FOO") if "BAR" in os.environ: os.environ.pop("BAR") if "FOOBAR" in os.environ: os.environ.pop("FOOBAR") if "BAZ" in os.environ: os.environ.pop("BAZ") if "ART" in os.environ: os.environ.pop("ART") def test_singletone_service(self): configs = [ RESOURCES_DIR + "yaml/singletone-service.yml", ] self.obj.configure(configs, read_config_files=False) self.obj.prepare() self.assertEquals(2, len(self.obj.services)) self.assertEquals(None, self.obj.services[0].parameters['run-at']) self.assertEquals("mock", self.obj.services[1].parameters['run-at']) self.assertEquals(2, len(self.obj.reporters)) self.assertEquals("mock", self.obj.reporters[0].parameters['run-at']) self.assertEquals(None, self.obj.reporters[1].parameters['run-at']) def test_autodetect_plugin_configs(self): self.sniff_log(self.obj.log) sys.path.append(RESOURCES_DIR + "plugins") try: configs = [ RESOURCES_DIR + "plugins/bzt_plugin_dummy/demo.yml", ] self.obj.configure(configs, read_config_files=True) self.obj.prepare() self.assertEqual({'class': 'bzt_plugin_dummy.dummy.DummyExecutor'}, self.obj.config['modules']['dummy']) finally: sys.path.remove(RESOURCES_DIR + "plugins")
def test_pack_and_send_to_blazemeter(self): obj = CloudProvisioning() obj.engine = EngineEmul() obj.engine.config.merge({ "execution": { "executor": "selenium", "concurrency": 5500, "locations": { "us-east-1": 1, "us-west": 2 }, "scenario": { "script": RESOURCES_DIR + "selenium/junit/java_package" } }, "modules": { "selenium": "bzt.modules.selenium.SeleniumExecutor", "cloud": "bzt.modules.blazemeter.CloudProvisioning", "junit": "bzt.modules.java.JUnitTester" }, "provisioning": "cloud" }) obj.engine.unify_config() obj.parameters = obj.engine.config['execution'][0] obj.settings["token"] = "FakeToken" mock = BZMock(obj.user) mock.mock_get.update({ 'https://a.blazemeter.com/api/v4/web/elfinder/1?cmd=open&target=s1_Lw': { "files": [] }, 'https://a.blazemeter.com/api/v4/multi-tests?projectId=1&name=Taurus+Cloud+Test': { "result": [] }, 'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Cloud+Test': { "result": [{ "id": 1, 'name': 'Taurus Cloud Test', "configuration": { "type": "taurus" } }] }, }) mock.mock_post.update({ 'https://a.blazemeter.com/api/v4/projects': { "result": { "id": 1, 'workspaceId': 1 } }, 'https://a.blazemeter.com/api/v4/multi-tests': { "result": {} }, 'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Cloud+Test': { "result": { "id": 1, "configuration": { "type": "taurus" } } }, 'https://a.blazemeter.com/api/v4/tests/1/files': {} }) mock.mock_patch.update( {'https://a.blazemeter.com/api/v4/tests/1': { "result": {} }}) obj.prepare() unpack_cfgs = obj.engine.config.get(Service.SERV) self.assertEqual(len(unpack_cfgs), 1) self.assertEqual(unpack_cfgs[0]['module'], Unpacker.UNPACK) self.assertEqual(unpack_cfgs[0][Unpacker.FILES], ['java_package.zip']) self.assertTrue( zipfile.is_zipfile(obj.engine.artifacts_dir + '/java_package.zip'))
def __init__(self): super(LocalProvisioningEmul, self).__init__() self.engine = EngineEmul()