def get_rfiles(self): rfiles = [] additional_files = [] for executor in self.executors: executor_rfiles = executor.get_resource_files() config = to_json(self.engine.config.get('execution')) config += to_json(self.engine.config.get('scenarios')) config += to_json(executor.settings) for rfile in executor_rfiles: if has_variable_pattern(rfile): continue if not os.path.exists(self.engine.find_file(rfile)): raise TaurusConfigError( "%s: resource file '%s' not found" % (executor, rfile)) if to_json( rfile ) not in config: # TODO: might be check is needed to improve additional_files.append(rfile) rfiles += executor_rfiles if additional_files: raise TaurusConfigError( "Following files can't be handled in cloud: %s" % additional_files) rfiles = list(set(rfiles)) rfiles = [x for x in rfiles if not has_variable_pattern(x)] self.log.debug("All resource files are: %s", rfiles) return rfiles
def _request_mock(self, method, url, **kwargs): """ :param method: :param url: :param kwargs: :rtype: requests.Response """ # TODO: make it simplier, mocking and replacing requests.request of BZAObject if method == 'GET': resp = self.mock_get[url] elif method == 'POST': resp = self.mock_post[url] elif method == 'PATCH': resp = self.mock_patch[url] else: raise ValueError() response = requests.Response() if isinstance(resp, list): resp = resp.pop(0) data = kwargs['data'] ROOT_LOGGER.debug("Emulated %s %s %s: %s", method, url, ("%s" % data)[:4096], resp) self.requests.append({"method": method, "url": url, "data": data}) if isinstance(resp, BaseException): raise resp response._content = to_json(resp) response.status_code = 200 return response
def send_monitoring_data(self, engine_id, data): file_name = '%s-%s-c.monitoring.json' % (self['id'], engine_id) self.upload_file(file_name, to_json(data)) if not self.monitoring_upload_notified: self.log.debug("Sending engine health notification") self.notify_monitoring_file(file_name) self.monitoring_upload_notified = True
def _get_scenario_label(self, name, scenarios): if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path( scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name return label
def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(self.engine, scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(self.engine, scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: try: error = ValueError("Wrong script in scenario") scen = self.__scenario.get(Scenario.SCRIPT, error) self._label = os.path.basename(scen) except BaseException: self._label = hashlib.md5(to_json(self.__scenario).encode()).hexdigest() return self.__scenario
def test_negative_response_time_scaling_crash(self): obj = ConsolidatingAggregator() obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0] obj.prepare() self.sniff_log(obj.log) mock = MockReader() mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0)) mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0)) mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0)) mock.data.append( (7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0)) mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0)) mock.data.append( (6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0)) obj.add_underling(mock) obj.check() for point in obj.datapoints(): obj.log.info(to_json(point)) self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())
def test_kpiset_trapped_getitem(self): def new(): subj = KPISet(perc_levels=(100.0,)) subj[KPISet.RESP_TIMES].add(0.1) subj[KPISet.RESP_TIMES].add(0.01) subj[KPISet.RESP_TIMES].add(0.001) subj.recalculate() return subj def enc_dec_iter(vals): vals = list(vals) dct = {x[0]: x[1] for x in vals} jsoned = to_json(dct) return json.loads(jsoned) exp = {u'avg_ct': 0, u'avg_lt': 0, u'avg_rt': 0, u'bytes': 0, u'concurrency': 0, u'errors': [], u'fail': 0, u'perc': {u'100.0': 0.1}, u'rc': {}, u'rt': {u'0.001': 1, u'0.01': 1, u'0.1': 1}, u'stdev_rt': 0.058 if PY2 else 0.05802585630561603, u'succ': 0, u'throughput': 0} self.assertEqual(exp, enc_dec_iter(new().items())) if PY2: self.assertEqual(exp, enc_dec_iter(new().viewitems())) self.assertEqual(exp, enc_dec_iter(new().iteritems())) self.assertEqual('{"100.0": 0.1}', to_json(new().get(KPISet.PERCENTILES), indent=None))
def get_kpi_body(self, data_buffer, tags, is_final): # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes have gotten on it. signalfx_labels_list = [] if data_buffer: self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP]) self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP]) # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way for dpoint in data_buffer: time_stamp = dpoint[DataPoint.TIMESTAMP] for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]): dimensions = copy.deepcopy(tags) dimensions.update({'label': label or 'OVERALL'}) label_data = self.__convert_data(kpi_set, time_stamp * self.multi, dimensions) signalfx_labels_list.extend(label_data) data = {"gauge": signalfx_labels_list} return to_json(data)
def test_json(self): obj = self.obj mock = MockReader() mock.buffer_scale_idx = '100.0' mock.data.append((1, "", 1, r(), r(), r(), 200, None, '', 0)) mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0)) mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0)) mock.data.append((3, "", 1, r(), r(), r(), 200, None, '', 0)) mock.data.append((3, "", 1, r(), r(), r(), 200, None, '', 0)) mock.data.append((4, "", 1, r(), r(), r(), 200, None, '', 0)) mock.data.append((4, "", 1, r(), r(), r(), 200, None, '', 0)) obj.add_listener(mock) for point in mock.datapoints(True): pass for point in mock.results: serialized = json.loads(to_json(point)) rt_keys = serialized["current"][""]["rt"].keys() for key in rt_keys: rt = float(key) self.assertGreaterEqual(rt, 1.0) self.assertLessEqual(rt, 2.0)
def _request_mock(self, method, url, **kwargs): """ :param method: :param url: :param kwargs: :rtype: requests.Response """ # TODO: make it simplier, mocking and replacing requests.request of BZAObject if method == 'GET': resp = self.mock_get[url] elif method == 'POST': resp = self.mock_post[url] elif method == 'PATCH': resp = self.mock_patch[url] else: raise ValueError() response = requests.Response() if isinstance(resp, list): resp = resp.pop(0) data = kwargs['data'] logging.debug("Emulated %s %s %s: %s", method, url, data, resp) self.requests.append({"method": method, "url": url, "data": data}) if isinstance(resp, BaseException): raise resp response._content = to_json(resp) response.status_code = 200 return response
def test_datapoint_to_json(self): obj = ConsolidatingAggregator() obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0] obj.prepare() obj.add_underling(self.get_success_reader()) for point in obj.datapoints(): obj.log.info(to_json(point))
def start_online(self, test_id, session_name): """ Start online test :type test_id: str :return: """ self.log.info("Initiating data feeding...") data = urlencode({}) if self.token: url = self.address + "/api/latest/tests/%s/start-external" % test_id else: url = self.address + "/api/latest/sessions" resp = self._request(url, data) self.active_session_id = str(resp['result']['session']['id']) self.data_signature = str(resp['result']['signature']) self.test_id = test_id self.user_id = str(resp['result']['session']['userId']) if self.token: self.results_url = self.address + '/app/#reports/%s' % self.active_session_id if session_name: url = self.address + "/api/latest/sessions/%s" % self.active_session_id self._request(url, to_json({"name": str(session_name)}), headers={"Content-Type": "application/json"}, method='PATCH') else: self.test_id = resp['result']['session']['testId'] self.results_url = resp['result']['publicTokenUrl'] return self.results_url
def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get( 'scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: if Scenario.SCRIPT in self.__scenario: # using script name if present error = ValueError("Wrong script in scenario") self._label = os.path.basename( self.__scenario.get(Scenario.SCRIPT, error)) else: # last resort - a checksum of whole scenario self._label = hashlib.md5(to_json( self.__scenario).encode()).hexdigest() return self.__scenario
def send_kpi_data(self, data_buffer, is_check_response=True): """ Sends online data :param is_check_response: :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ data = [] for sec in data_buffer: self.first_ts = min(self.first_ts, sec[DataPoint.TIMESTAMP]) self.last_ts = max(self.last_ts, sec[DataPoint.TIMESTAMP]) for lbl, item in iteritems(sec[DataPoint.CURRENT]): if lbl == '': label = "ALL" else: label = lbl json_item = None for lbl_item in data: if lbl_item["name"] == label: json_item = lbl_item break if not json_item: json_item = self.__label_skel(label) data.append(json_item) interval_item = self.__interval_json(item, sec) for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): interval_item['rc'].append({"n": cnt, "rc": r_code}) json_item['intervals'].append(interval_item) cumul = sec[DataPoint.CUMULATIVE][lbl] json_item['n'] = cumul[KPISet.SAMPLE_COUNT] json_item["summary"] = self.__summary_json(cumul) data = {"labels": data} url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s" url = url % (self.active_session_id, self.data_signature, self.test_id, self.user_id) url += "&pq=0&target=labels_bulk&update=1" hdr = {"Content-Type": " application/json"} response = self._request(url, to_json(data), headers=hdr) if response and 'response_code' in response and response[ 'response_code'] != 200: raise RuntimeError("Failed to feed data, response code %s" % response['response_code']) if response and 'result' in response and is_check_response: result = response['result']['session'] self.log.debug("Result: %s", result) if 'statusCode' in result and result['statusCode'] > 100: self.log.info("Test was stopped through Web UI: %s", result['status']) raise ManualShutdown("The test was interrupted through Web UI")
def test_kpiset_trapped_getitem(self): def new(): subj = KPISet(perc_levels=(100.0,)) subj[KPISet.RESP_TIMES].add(0.1) subj[KPISet.RESP_TIMES].add(0.01) subj[KPISet.RESP_TIMES].add(0.001) subj.recalculate() return subj def enc_dec_iter(vals): vals = list(vals) dct = {x[0]: x[1] for x in vals} jsoned = to_json(dct) return json.loads(jsoned) exp = {u'avg_ct': 0, u'avg_lt': 0, u'avg_rt': 0, u'bytes': 0, u'concurrency': 0, u'errors': [], u'fail': 0, u'perc': {u'100.0': 0.1}, u'rc': {}, u'rt': {u'0.001': 1, u'0.01': 1, u'0.1': 1}, u'stdev_rt': 0.05802585630561603, u'succ': 0, u'throughput': 0} self.assertEqual(exp, enc_dec_iter(new().items())) self.assertEqual('{"100.0": 0.1}', to_json(new().get(KPISet.PERCENTILES), indent=None))
def get_kpi_body(self, data_buffer, is_final): # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes that were received on it. report_items = BetterDict() if data_buffer: self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP]) self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP]) # following data is received in the cumulative way for label, kpi_set in iteritems( data_buffer[-1][DataPoint.CUMULATIVE]): if self.owner.extend_report: report_item = {} for state in kpi_set: report_item[state] = self.__get_label( label, kpi_set[state]) self.__add_errors(report_item[state], kpi_set[state]) else: report_item = self.__get_label(label, kpi_set) self.__add_errors(report_item, kpi_set) # 'Errors' tab report_items[label] = report_item # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way if report_items: for dpoint in data_buffer: time_stamp = dpoint[DataPoint.TIMESTAMP] for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]): exc = TaurusInternalException( 'Cumulative KPISet is non-consistent') report_item = report_items.get(label, exc) if self.owner.extend_report: for state in report_item: if state in kpi_set: report_item[state]['intervals'].append( self.__get_interval( kpi_set[state], time_stamp)) else: report_item['intervals'].append( self.__get_interval(kpi_set, time_stamp)) report_items = [ report_items[key] for key in sorted(report_items.keys()) ] # convert dict to list data = {"labels": report_items, "sourceID": id(self.owner)} if is_final: data['final'] = True return to_json(data)
def test_datapoint_to_json(self): self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0] self.obj.prepare() self.obj.add_underling(get_success_reader()) for point in self.obj.datapoints(): if point[DataPoint.SUBRESULTS] == [point]: del point[DataPoint.SUBRESULTS] self.obj.log.info(to_json(point))
def update_session(self, active_session_id, data): hdr = {"Content-Type": "application/json"} data = self._request(self.address + '/api/latest/sessions/%s' % active_session_id, to_json(data), headers=hdr, method="PUT") return data['result']
def send_kpi_data(self, data_buffer, is_check_response=True, is_final=False): """ Sends online data :param is_check_response: :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ data = [] for sec in data_buffer: self.first_ts = min(self.first_ts, sec[DataPoint.TIMESTAMP]) self.last_ts = max(self.last_ts, sec[DataPoint.TIMESTAMP]) for lbl, item in iteritems(sec[DataPoint.CURRENT]): if lbl == '': label = "ALL" else: label = lbl json_item = None for lbl_item in data: if lbl_item["name"] == label: json_item = lbl_item break if not json_item: json_item = self.__label_skel(label) data.append(json_item) interval_item = self.__interval_json(item, sec) for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): interval_item['rc'].append({"n": cnt, "rc": r_code}) json_item['intervals'].append(interval_item) cumul = sec[DataPoint.CUMULATIVE][lbl] json_item['n'] = cumul[KPISet.SAMPLE_COUNT] json_item["summary"] = self.__summary_json(cumul) data = {"labels": data, "sourceID": id(self)} if is_final: data['final'] = True url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s" url = url % (self.active_session_id, self.data_signature, self.test_id, self.user_id) url += "&pq=0&target=%s&update=1" % self.kpi_target hdr = {"Content-Type": " application/json"} response = self._request(url, to_json(data), headers=hdr) if response and 'response_code' in response and response['response_code'] != 200: raise RuntimeError("Failed to feed data, response code %s" % response['response_code']) if response and 'result' in response and is_check_response: result = response['result']['session'] self.log.debug("Result: %s", result) if 'statusCode' in result and result['statusCode'] > 100: self.log.info("Test was stopped through Web UI: %s", result['status']) raise ManualShutdown("The test was interrupted through Web UI")
def post_process(self): self.log.warning( 'Part of result data might be missed here due to BM API specifics') if not self.detach and self.router and not self.test_ended: self.router.stop_test() if self.results_url: if self.browser_open in ('end', 'both'): open_browser(self.results_url) if self.router and self.router.master: full = self.router.master.get_full() if 'note' in full and full['note']: self.log.warning( "Cloud test has probably failed with message: %s", full['note']) for session in full.get('sessions', ()): for error in session.get("errors", ()): raise TaurusException(to_json(error)) if "hasThresholds" in full and full["hasThresholds"]: thresholds = self.router.master.get_thresholds() for item in thresholds.get('data', []): if item.get('success', None) is False: reason = None for assertion in item.get('assertions', []): if assertion.get('success', None) is False: criterion = assertion.get('field', '') label = assertion.get('label', '') reason = "Cloud failure criterion %r (on label %r) was met" % ( criterion, label) break if reason is None: reason = "Cloud tests failed because failure criteria were met" self.log.warning(reason) raise AutomatedShutdown(reason) # if we have captured HARs, let's download them for service in self.engine.config.get(Service.SERV, []): mod = service.get( 'module', TaurusConfigError("No 'module' specified for service")) assert isinstance(mod, str), mod module = self.engine.instantiate_module(mod) if isinstance(module, ServiceStubCaptureHAR): self._download_logs() break if "functionalSummary" in full: summary = full["functionalSummary"] if summary is None or summary.get("isFailed", False): raise AutomatedShutdown("Cloud tests failed")
def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj
def send_error_summary(self, data_buffer): """ Sends error summary file :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ if not data_buffer: return recent = data_buffer[-1] if not recent[DataPoint.CUMULATIVE][''][KPISet.ERRORS]: return errors = self.__errors_skel(recent[DataPoint.TIMESTAMP], self.active_session_id, self.test_id, self.user_id) for label, label_data in iteritems(recent[DataPoint.CUMULATIVE]): if not label_data[KPISet.ERRORS]: continue if label == '': label = 'ALL' error_item = self.__error_item_skel(label) for err_item in label_data[KPISet.ERRORS]: if err_item["type"] == KPISet.ERRTYPE_ASSERT: error_item['assertionsCount'] += err_item['cnt'] error_item['assertions'].append({ "name": "All Assertions", "failureMessage": err_item['msg'], "failure": True, "error": False, "count": err_item['cnt'] }) else: error_item['count'] += err_item['cnt'] error_item['responseInfo'].append({ "description": err_item['msg'], "code": err_item['rc'], "count": err_item['cnt'], }) errors['summery']['labels'].append(error_item) self.upload_file("sample.jtl.blazemeter.summery.json", to_json(errors))
def aggregated_second(self, data): """ Store and assert aggregate sequence :type data: dict :raise AssertionError: """ if self.results: if self.results[-1]["ts"] >= data["ts"]: raise AssertionError("TS sequence wrong: %s>=%s" % (self.results[-1]["ts"], data["ts"])) ROOT_LOGGER.info("Data: %s", to_json(data)) self.results.append(data)
def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios", force_set=True) if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj
def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get('scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): raise ValueError("Invalid content of scenario, list type instead of dict or string") if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj
def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get( 'scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj
def write(self, fds, fmt): """ Write config into opened file :type fds: file :type fmt: str :raise ValueError: """ if fmt == self.JSON: fds.write(to_json(self)) elif fmt == self.YAML: yml = yaml.dump(self, default_flow_style=False, explicit_start=True, canonical=False) fds.write(yml) else: raise ValueError("Unknown dump format: %s" % fmt) fds.write("\n")
def write(self, fds, fmt): """ Write config into opened file :type fds: file :type fmt: str :raise TaurusInternalException: """ if fmt == self.JSON: fds.write(to_json(self)) elif fmt == self.YAML: yml = yaml.dump(self, default_flow_style=False, explicit_start=True, canonical=False, allow_unicode=True) fds.write(yml) else: raise TaurusInternalException("Unknown dump format: %s" % fmt) fds.write("\n")
def test_by_name(self, name, configuration, taurus_config, resource_files, proj_id): """ :type name: str :rtype: str """ tests = self.get_tests() test_id = None for test in tests: self.log.debug("Test: %s", test) if "name" in test and test['name'] == name: if test['configuration']['type'] == configuration['type']: if not proj_id or proj_id == test['projectId']: test_id = test['id'] self.log.debug("Matched: %s", test) if not test_id: self.log.debug("Creating new test") url = self.address + '/api/latest/tests' data = { "name": name, "projectId": proj_id, "configuration": configuration } hdr = {"Content-Type": " application/json"} resp = self._request(url, json.dumps(data), headers=hdr) test_id = resp['result']['id'] if configuration[ 'type'] == 'taurus': # FIXME: this is weird way to code self.log.debug("Uploading files into the test") url = '%s/api/latest/tests/%s/files' % (self.address, test_id) body = MultiPartForm() body.add_file_as_string('script', 'taurus.json', to_json(taurus_config)) for rfile in resource_files: body.add_file('files[]', rfile) hdr = {"Content-Type": body.get_content_type()} _ = self._request(url, body.form_as_bytes(), headers=hdr) self.log.debug("Using test ID: %s", test_id) return test_id
def write(self, fds, fmt): """ Write config into opened file :type fds: file :type fmt: str :raise TaurusInternalException: """ if fmt == self.JSON: json_s = to_json(self) fds.write(json_s.encode('utf-8')) elif fmt == self.YAML: yml = yaml.safe_dump(self, default_flow_style=False, explicit_start=True, canonical=False, allow_unicode=True, encoding='utf-8', width=float("inf")) fds.write(yml) else: raise TaurusInternalException("Unknown dump format: %s" % fmt) fds.write("\n".encode('utf-8'))
def write(self, fds, fmt): """ Write config into opened file :type fds: file :type fmt: str :raise TaurusInternalException: """ if fmt == self.JSON: json_s = to_json(self) fds.write(json_s.encode('utf-8')) elif fmt == self.YAML: yml = yaml.dump(self, default_flow_style=False, explicit_start=True, canonical=False, allow_unicode=True, encoding='utf-8', width=float("inf")) fds.write(yml) else: raise TaurusInternalException("Unknown dump format: %s" % fmt) fds.write("\n".encode('utf-8'))
def test_stdev_performance(self): start = time.time() self.configure(RESOURCES_DIR + "jmeter/jtl/slow-stdev.jtl") res = list(self.obj.datapoints(final_pass=True)) lst_json = to_json(res) self.assertNotIn('"perc": {},', lst_json) elapsed = time.time() - start ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res)) # self.assertLess(elapsed, len(res)) # less than 1 datapoint per sec is a no-go exp = [0.53060066889723, 0.39251356581014, 0.388405157629, 0.38927586980868, 0.30511697736531, 0.21160424043633, 0.07339064994943] self.assertEqual(exp, [round(x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME], 14) for x in res])
def test_negative_response_time_scaling_crash(self): self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0] self.obj.prepare() self.sniff_log(self.obj.log) mock = MockReader() mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0)) mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0)) mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0)) mock.data.append((7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0)) mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0)) mock.data.append((6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0)) self.obj.add_underling(mock) self.obj.check() for point in self.obj.datapoints(): self.obj.log.info(to_json(point)) self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())
def test_stdev_performance(self): start = time.time() self.configure(RESOURCES_DIR + "/jmeter/jtl/slow-stdev.jtl") res = list(self.obj.datapoints(final_pass=True)) lst_json = to_json(res) self.assertNotIn('"perc": {},', lst_json) elapsed = time.time() - start ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res)) # self.assertLess(elapsed, len(res)) # less than 1 datapoint per sec is a no-go exp = [ 2.2144798867972773, 0.7207704268609725, 0.606834452578833, 0.8284089170237546, 0.5858142211763572, 0.622922628329711, 0.5529488620851849, 0.6933748292117727, 0.4876162181858197, 0.42471180222446503, 0.2512251128133865 ] self.assertEqual( exp, [x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME] for x in res])
def test_by_name(self, name, configuration, taurus_config, resource_files, proj_id): """ :type name: str :rtype: str """ tests = self.get_tests() test_id = None for test in tests: self.log.debug("Test: %s", test) if "name" in test and test['name'] == name: if test['configuration']['type'] == configuration['type']: if not proj_id or proj_id == test['projectId']: test_id = test['id'] self.log.debug("Matched: %s", test) if not test_id: self.log.debug("Creating new test") url = self.address + '/api/latest/tests' data = {"name": name, "projectId": proj_id, "configuration": configuration} hdr = {"Content-Type": " application/json"} resp = self._request(url, json.dumps(data), headers=hdr) test_id = resp['result']['id'] if configuration['type'] == 'taurus': # FIXME: this is weird way to code self.log.debug("Uploading files into the test") url = '%s/api/latest/tests/%s/files' % (self.address, test_id) body = MultiPartForm() body.add_file_as_string('script', 'taurus.json', to_json(taurus_config)) for rfile in resource_files: body.add_file('files[]', rfile) hdr = {"Content-Type": body.get_content_type()} response = self._request(url, body.form_as_bytes(), headers=hdr) self.log.debug("Using test ID: %s", test_id) return test_id
def test_stdev_performance(self): start = time.time() self.configure(RESOURCES_DIR + "jmeter/jtl/slow-stdev.jtl") res = list(self.obj.datapoints(final_pass=True)) lst_json = to_json(res) self.assertNotIn('"perc": {},', lst_json) elapsed = time.time() - start ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res)) # self.assertLess(elapsed, len(res)) # less than 1 datapoint per sec is a no-go exp = [ 0.53060066889723, 0.39251356581014, 0.388405157629, 0.52855748890714, 0.39107758224016, 0.38999119030886, 0.32537625773864, 0.47057465198195, 0.2746790136753, 0.23251104555698, 0.08369447671202 ] self.assertEqual(exp, [ round(x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME], 14) for x in res ])
def test_stdev_performance(self): start = time.time() self.configure(RESOURCES_DIR + "/jmeter/jtl/slow-stdev.jtl") res = list(self.obj.datapoints(final_pass=True)) lst_json = to_json(res) self.assertNotIn('"perc": {},', lst_json) elapsed = time.time() - start ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res)) # self.assertLess(elapsed, len(res)) # less than 1 datapoint per sec is a no-go exp = [2.2144798867972773, 0.7207704268609725, 0.606834452578833, 0.8284089170237546, 0.5858142211763572, 0.622922628329711, 0.5529488620851849, 0.6933748292117727, 0.4876162181858197, 0.42471180222446503, 0.2512251128133865] self.assertEqual(exp, [x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME] for x in res])
def test_by_name(self, name, configuration, taurus_config, resource_files): """ :type name: str :rtype: str """ tests = self.get_tests() test_id = None for test in tests: self.log.debug("Test: %s", test) if "name" in test and test["name"] == name and test["configuration"]["type"] == configuration["type"]: test_id = test["id"] if not test_id: self.log.debug("Creating new test") url = self.address + "/api/latest/tests" data = {"name": name, "configuration": configuration} hdr = {"Content-Type": " application/json"} resp = self._request(url, json.dumps(data), headers=hdr) test_id = resp["result"]["id"] if configuration["type"] == "taurus": # FIXME: this is weird way to code self.log.debug("Uploading files into the test") url = "%s/api/latest/tests/%s/files" % (self.address, test_id) body = MultiPartForm() body.add_file_as_string("script", "taurus.json", to_json(taurus_config)) for rfile in resource_files: body.add_file("files[]", rfile) hdr = {"Content-Type": body.get_content_type()} response = self._request(url, body.form_as_bytes(), headers=hdr) self.log.debug("Using test ID: %s", test_id) return test_id
def _dump_vars(self, key): cmdline = [] vals = self.get_scenario().get(key) if isinstance(vals, string_types): cmdline += ["--%s" % key, vals] else: data = {"values": []} if isinstance(vals, list): data['values'] = vals else: for varname, val in iteritems(vals): data["values"] = { "key": varname, "value": val, "type": "any", "enabled": True } fname = self.engine.create_artifact(key, ".json") with open(fname, "wt") as fds: fds.write(to_json(data)) cmdline += ["--%s" % key, fname] return cmdline
def _dump_vars(self, key): cmdline = [] vals = self.get_scenario().get(key) if isinstance(vals, str): cmdline += ["--%s" % key, vals] else: data = {"values": []} if isinstance(vals, list): data['values'] = vals else: for varname, val in iteritems(vals): data["values"] = { "key": varname, "value": val, "type": "any", "enabled": True } fname = self.engine.create_artifact(key, ".json") with open(fname, "wt") as fds: fds.write(to_json(data)) cmdline += ["--%s" % key, fname] return cmdline
def send_monitoring_data(self, engine_id, data): self.upload_file('%s-%s-c.monitoring.json' % (self['id'], engine_id), to_json(data))
def _request(self, url, data=None, headers=None, method=None, raw_result=False): """ :param url: str :type data: Union[dict,str] :param headers: dict :param method: str :return: dict """ if not headers: headers = {} headers["X-Client-Id"] = "Taurus" headers["X-Client-Version"] = VERSION if isinstance(self.token, string_types) and ':' in self.token: token = self.token if isinstance(token, text_type): token = token.encode('ascii') token = base64.b64encode(token).decode('ascii') headers['Authorization'] = 'Basic ' + token elif self.token: headers["X-Api-Key"] = self.token if method: log_method = method else: log_method = 'GET' if data is None else 'POST' url = str(url) if isinstance(data, text_type): data = data.encode("utf8") if isinstance(data, (dict, list)): data = to_json(data) headers["Content-Type"] = "application/json" self.log.debug("Request: %s %s %s", log_method, url, data[:self.logger_limit] if data else None) response = self.http_request(method=log_method, url=url, data=data, headers=headers, cookies=self._cookies, timeout=self.timeout) resp = response.content if not isinstance(resp, str): resp = resp.decode() self.log.debug("Response: %s", resp[:self.logger_limit] if resp else None) if response.status_code >= 400: try: result = json.loads(resp) if len(resp) else {} if 'error' in result and result['error']: raise TaurusNetworkError("API call error %s: %s" % (url, result['error'])) except ValueError: raise TaurusNetworkError( "API call error %s: %s %s" % (url, response.status_code, response.reason)) if raw_result: return resp try: result = json.loads(resp) if len(resp) else {} except ValueError as exc: self.log.debug('Response: %s', resp) raise TaurusNetworkError("Non-JSON response from API: %s" % exc) if 'error' in result and result['error']: raise TaurusNetworkError("API call error %s: %s" % (url, result['error'])) return result
def enc_dec_iter(vals): vals = list(vals) dct = {x[0]: x[1] for x in vals} jsoned = to_json(dct) return json.loads(jsoned)
def create_project(self, proj_name): hdr = {"Content-Type": "application/json"} data = self._request(self.address + '/api/latest/projects', to_json({"name": str(proj_name)}), headers=hdr) return data['result']['id']
def _request(self, url, data=None, headers=None, method=None, raw_result=False): """ :param url: str :type data: Union[dict,str] :param headers: dict :param method: str :return: dict """ if not headers: headers = {} headers["X-Client-Id"] = "Taurus" headers["X-Client-Version"] = VERSION has_auth = headers and "X-Api-Key" in headers if has_auth: pass # all is good, we have auth provided elif isinstance(self.token, string_types) and ':' in self.token: token = self.token if isinstance(token, text_type): token = token.encode('ascii') token = base64.b64encode(token).decode('ascii') headers['Authorization'] = 'Basic ' + token elif self.token: headers["X-Api-Key"] = self.token if method: log_method = method else: log_method = 'GET' if data is None else 'POST' url = str(url) if isinstance(data, text_type): data = data.encode("utf-8") if isinstance(data, (dict, list)): data = to_json(data) headers["Content-Type"] = "application/json" self.log.debug("Request: %s %s %s", log_method, url, data[:self.logger_limit] if data else None) response = self.http_request(method=log_method, url=url, data=data, headers=headers, timeout=self.timeout) resp = response.content if not isinstance(resp, str): resp = resp.decode() self.log.debug("Response [%s]: %s", response.status_code, resp[:self.logger_limit] if resp else None) if response.status_code >= 400: try: result = json.loads(resp) if len(resp) else {} if 'error' in result and result['error']: raise TaurusNetworkError("API call error %s: %s" % (url, result['error'])) else: raise TaurusNetworkError("API call error %s on %s: %s" % (response.status_code, url, result)) except ValueError: raise TaurusNetworkError("API call error %s: %s %s" % (url, response.status_code, response.reason)) if raw_result: return resp try: result = json.loads(resp) if len(resp) else {} except ValueError as exc: self.log.debug('Response: %s', resp) raise TaurusNetworkError("Non-JSON response from API: %s" % exc) if 'error' in result and result['error']: raise TaurusNetworkError("API call error %s: %s" % (url, result['error'])) return result