def pytest_runtest_teardown(self, item): yield uuid = self._cache.get(item.nodeid) test_result = self.allure_logger.get_test(uuid) test_result.labels.extend([ Label(name=name, value=value) for name, value in allure_labels(item) ]) test_result.labels.extend([ Label(name=LabelType.TAG, value=value) for value in pytest_markers(item) ]) test_result.labels.extend([ Label(name=LabelType.TAG, value=value) for value in pytest_markers(item) ]) test_result.labels.extend([ Label(name=name, value=value) for name, value in allure_suite_labels(item) ]) test_result.labels.append(Label(name=LabelType.HOST, value=self._host)) test_result.labels.append( Label(name=LabelType.THREAD, value=self._thread)) test_result.labels.append( Label(name=LabelType.FRAMEWORK, value='pytest')) test_result.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test_result.labels.append( Label(name='package', value=allure_package(item))) test_result.links.extend([ Link(link_type, url, name) for link_type, url, name in allure_links(item) ])
def start_scenario(self, scenario): self.current_scenario_uuid = uuid4() self.fixture_context.enter() self.execution_context.enter() self.execution_context.append(self.current_scenario_uuid) test_case = TestResult(uuid=self.current_scenario_uuid, start=now()) test_case.name = scenario_name(scenario) test_case.historyId = scenario_history_id(scenario) test_case.description = '\n'.join(scenario.description) test_case.parameters = scenario_parameters(scenario) issue_pattern = self.behave_config.userdata.get( 'AllureFormatter.issue_pattern', None) link_pattern = self.behave_config.userdata.get( 'AllureFormatter.link_pattern', None) test_case.links.extend( scenario_links(scenario, issue_pattern=issue_pattern, link_pattern=link_pattern)) test_case.labels.extend(scenario_labels(scenario)) test_case.labels.append( Label(name=LabelType.FEATURE, value=scenario.feature.name)) test_case.labels.append(Label(name=LabelType.FRAMEWORK, value='behave')) test_case.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) self.logger.schedule_test(self.current_scenario_uuid, test_case)
def stop_current_test(self, name, attributes): uuid = self.stack.pop() test = self.reporter.get_test(uuid) test.status = utils.get_allure_status(attributes.get('status')) test.labels.extend(utils.get_allure_suites(attributes.get('longname'))) test.labels.extend(allure_tags(attributes)) for label_type in (LabelType.EPIC, LabelType.FEATURE, LabelType.STORY): test.labels.extend(allure_labels(attributes, label_type)) for link_type in (LinkType.ISSUE, LinkType.TEST_CASE, LinkType.LINK): test.links.extend(allure_links(attributes, link_type)) test.labels.append(Label(name=LabelType.THREAD, value=self.pool_id)) test.labels.append(Label(name=LabelType.HOST, value=host_tag())) test.labels.append( Label(name=LabelType.FRAMEWORK, value='robotframework')) test.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test.statusDetails = StatusDetails(message=attributes.get('message'), trace=self.get_traceback_message()) test.description = attributes.get('doc') last_link = list(self.links.values())[-1] if self.links else None if attributes.get(Severity.CRITICAL, 'no') == 'yes': test.labels.append( Label(name=LabelType.SEVERITY, value=Severity.CRITICAL)) if last_link: test.links.append(Link(LinkType.LINK, last_link, 'Link')) test.stop = now() self.reporter.close_test(uuid)
def start_test(self, parent_uuid, uuid, name, parameters, context): scenario = context['scenario'] self.fixture_context.enter() self.execution_context.enter() self.execution_context.append(uuid) test_case = TestResult(uuid=uuid, start=now()) test_case.name = scenario_name(scenario) test_case.historyId = scenario_history_id(scenario) test_case.description = '\n'.join(scenario.description) test_case.parameters = scenario_parameters(scenario) test_case.labels.extend([ Label(name=LabelType.TAG, value=tag) for tag in scenario_tags(scenario) ]) test_case.labels.append( Label(name=LabelType.SEVERITY, value=scenario_severity(scenario).value)) test_case.labels.append( Label(name=LabelType.FEATURE, value=scenario.feature.name)) test_case.labels.append(Label(name=LabelType.FRAMEWORK, value='behave')) test_case.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) self.logger.schedule_test(uuid, test_case)
def pytest_bdd_before_scenario(self, request, feature, scenario): uuid = get_uuid(request.node.nodeid) full_name = get_full_name(feature, scenario) name = get_name(request.node, scenario) with self.lifecycle.schedule_test_case(uuid=uuid) as test_result: test_result.fullName = full_name test_result.name = name test_result.start = now() test_result.labels.extend([ Label(name=LabelType.TAG, value=value) for value in pytest_markers(request.node) ]) test_result.labels.append( Label(name=LabelType.HOST, value=self.host)) test_result.labels.append( Label(name=LabelType.THREAD, value=self.thread)) test_result.labels.append( Label(name=LabelType.FRAMEWORK, value="pytest-bdd")) test_result.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test_result.labels.append( Label(name=LabelType.FEATURE, value=feature.name)) test_result.links.extend([ Link(link_type, url, name) for link_type, url, name in allure_links(request.node) ]) test_result.parameters = get_params(request.node) finalizer = partial(self._scenario_finalizer, scenario) request.node.addfinalizer(finalizer)
def pytest_runtest_protocol(self, item, nextitem): uuid = self._cache.set(item.nodeid) for fixturedef in _test_fixtures(item): group_uuid = self._cache.get(fixturedef) if not group_uuid: group_uuid = self._cache.set(fixturedef) group = TestResultContainer(uuid=group_uuid) self.allure_logger.start_group(group_uuid, group) self.allure_logger.update_group(group_uuid, children=uuid) test_case = TestResult(name=allure_name(item), uuid=uuid) self.allure_logger.schedule_test(uuid, test_case) if hasattr(item, 'function'): test_case.description = item.function.__doc__ yield for name, value in item.callspec.params.items() if hasattr( item, 'callspec') else (): test_result = self.allure_logger.get_test(uuid) if test_result: test_result.parameters.append(Parameter( name, represent(value))) test_case.labels.extend([ Label(name=name, value=value) for name, value in allure_labels(item) ]) test_case.labels.extend([ Label(name=LabelType.TAG, value=value) for value in pytest_markers(item) ]) test_case.labels.append(Label(name=LabelType.HOST, value=self._host)) test_case.labels.append( Label(name=LabelType.THREAD, value=self._thread)) test_case.labels.append(Label(name=LabelType.FRAMEWORK, value='pytest')) test_case.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test_case.links += [ Link(link_type, url, name) for link_type, url, name in allure_links(item) ] test_case.fullName = allure_full_name(item) test_case.historyId = md5(test_case.fullName) test_case.labels.append(Label('package', allure_package(item))) uuid = self._cache.pop(item.nodeid) self.allure_logger.close_test(uuid)
def pytest_runtest_teardown(self, item): yield uuid = self._cache.get(item.nodeid) test_result = self.allure_logger.get_test(uuid) test_result.labels.extend([Label(name=name, value=value) for name, value in allure_labels(item)]) test_result.labels.extend([Label(name=LabelType.TAG, value=value) for value in pytest_markers(item)]) test_result.labels.extend([Label(name=name, value=value) for name, value in allure_suite_labels(item)]) test_result.labels.append(Label(name=LabelType.HOST, value=self._host)) test_result.labels.append(Label(name=LabelType.THREAD, value=self._thread)) test_result.labels.append(Label(name=LabelType.FRAMEWORK, value='pytest')) test_result.labels.append(Label(name=LabelType.LANGUAGE, value=platform_label())) test_result.labels.append(Label(name='package', value=allure_package(item))) test_result.links.extend([Link(link_type, url, name) for link_type, url, name in allure_links(item)])
def startTest(self, event): if self.is_registered(): with self.lifecycle.schedule_test_case() as test_result: test_result.name = name(event) test_result.start = timestamp_millis(event.startTime) test_result.fullName = fullname(event) test_result.testCaseId = md5(test_result.fullName) test_result.historyId = md5(event.test.id()) test_result.labels.extend(labels(event.test)) test_result.labels.append( Label(name=LabelType.HOST, value=self._host)) test_result.labels.append( Label(name=LabelType.THREAD, value=self._thread)) test_result.labels.append( Label(name=LabelType.FRAMEWORK, value='nose2')) test_result.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test_result.parameters = params(event)
def start_scenario(self, scenario): self.current_scenario_uuid = uuid4() self.fixture_context.enter() self.execution_context.enter() self.execution_context.append(self.current_scenario_uuid) test_case = TestResult(uuid=self.current_scenario_uuid, start=now()) test_case.name = scenario_name(scenario) test_case.historyId = scenario_history_id(scenario) test_case.description = '\n'.join(scenario.description) test_case.parameters = scenario_parameters(scenario) test_case.labels.extend([Label(name=LabelType.TAG, value=tag) for tag in scenario_tags(scenario)]) test_case.labels.append(Label(name=LabelType.SEVERITY, value=scenario_severity(scenario).value)) test_case.labels.append(Label(name=LabelType.FEATURE, value=scenario.feature.name)) test_case.labels.append(Label(name=LabelType.FRAMEWORK, value='behave')) test_case.labels.append(Label(name=LabelType.LANGUAGE, value=platform_label())) self.logger.schedule_test(self.current_scenario_uuid, test_case)
def pytest_bdd_before_scenario(self, request, feature, scenario): uuid = get_uuid(request.node.nodeid) full_name = get_full_name(feature, scenario) with self.lifecycle.schedule_test_case(uuid=uuid) as test_result: test_result.fullName = full_name test_result.start = now() test_result.labels.append( Label(name=LabelType.HOST, value=self.host)) test_result.labels.append( Label(name=LabelType.THREAD, value=self.thread)) test_result.labels.append( Label(name=LabelType.FRAMEWORK, value="pytest-bdd")) test_result.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test_result.labels.append( Label(name=LabelType.FEATURE, value=feature.name)) finalizer = partial(self._scenario_finalizer, scenario) request.node.addfinalizer(finalizer)
def stop_current_test(self, name, attributes): uuid = self.stack.pop() test = self.reporter.get_test(uuid) test.status = utils.get_allure_status(attributes.get('status')) test.labels.extend(utils.get_allure_suites(attributes.get('longname'))) test.labels.extend(utils.get_allure_tags(attributes.get('tags'))) test.labels.append(utils.get_allure_thread(self.pool_id)) test.labels.append(Label(LabelType.HOST, value=host_tag())) test.labels.append( Label(name=LabelType.FRAMEWORK, value='robotframework')) test.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) test.statusDetails = StatusDetails(message=attributes.get('message')) test.description = attributes.get('doc') last_link = list(self.links.values())[-1] if self.links else None if last_link: test.links.append(Link(LinkType.LINK, last_link, 'Link')) test.stop = now() self.reporter.close_test(uuid)
def stop_current_test(self, name, attributes): uuid = self.stack.pop() test = self.reporter.get_test(uuid) test.status = utils.get_allure_status(attributes.get('status')) test.labels.extend(utils.get_allure_suites(attributes.get('longname'))) test.labels.extend(allure_tags(attributes)) for label_type in (LabelType.EPIC, LabelType.FEATURE, LabelType.STORY): test.labels.extend(allure_labels(attributes, label_type)) for link_type in (LinkType.ISSUE, LinkType.TEST_CASE, LinkType.LINK): test.links.extend(allure_links(attributes, link_type)) test.labels.append(Label(name=LabelType.THREAD, value=self.pool_id)) test.labels.append(Label(name=LabelType.HOST, value=host_tag())) test.labels.append(Label(name=LabelType.FRAMEWORK, value='robotframework')) test.labels.append(Label(name=LabelType.LANGUAGE, value=platform_label())) test.statusDetails = StatusDetails(message=attributes.get('message'), trace=self.get_traceback_message()) test.description = attributes.get('doc') last_link = list(self.links.values())[-1] if self.links else None if attributes.get(Severity.CRITICAL, 'no') == 'yes': test.labels.append(Label(name=LabelType.SEVERITY, value=Severity.CRITICAL)) if last_link: test.links.append(Link(LinkType.LINK, last_link, 'Link')) test.stop = now() self.reporter.close_test(uuid)
def start_scenario(self, scenario): self.current_scenario_uuid = uuid4() self.group_context.enter() test_case = TestResult(uuid=self.current_scenario_uuid, start=now()) test_case.name = scenario_name(scenario) test_case.fullName = get_fullname(scenario) test_case.historyId = scenario_history_id(scenario) test_case.description = '\n'.join(scenario.description) test_case.parameters = scenario_parameters(scenario) test_case.links.extend( scenario_links(scenario, issue_pattern=self.issue_pattern, link_pattern=self.link_pattern)) test_case.labels.extend(scenario_labels(scenario)) test_case.labels.append( Label(name=LabelType.FEATURE, value=scenario.feature.name)) test_case.labels.append(Label(name=LabelType.FRAMEWORK, value='behave')) test_case.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) self.logger.schedule_test(self.current_scenario_uuid, test_case)
def __init__(self, lifecycle): self.lifecycle = lifecycle self._platform = platform_label() self._host = host_tag() self._current_msg = None self._current_tb = None
def process_test_case(self, test_case, file_modication_datetime=None): with self.lifecycle.schedule_test_case() as test_result: test_index = test_case["id"] test_data = test_case.get("data") or {} job = test_data.get("job") or {} test_result.name = test_index self._record_start_stop(test_result, file_modication_datetime, job) test_result.fullName = test_index test_result.testCaseId = md5(test_index) test_result.historyId = md5(test_index) tool_id = self._record_suite_labels(test_result, test_data, job) self._attach_data("test_data", json.dumps(test_data, indent=JSON_INDENT), attachment_type=AttachmentType.JSON) for key in [ "stderr", "stdout", "command_line", "external_id", "job_messages" ]: val = job.get(key) if not val: continue if isinstance(val, list): attachment_type = AttachmentType.JSON # job messages val = json.dumps(val, indent=JSON_INDENT) else: if not val.strip(): continue attachment_type = AttachmentType.TEXT self._attach_data(key, val, attachment_type=attachment_type) problem_message = None for key in ["execution_problem", "output_problems"]: val = test_data.get(key) if not val: continue if isinstance(val, list) and val: # remove duplicated messages... val = list(set(val)) attachment_type = AttachmentType.HTML as_html_list = "<ul>" as_html_list += "\n".join( [f"<li><pre>{v}</pre></li>" for v in val]) as_html_list += "</ul>" problem_message = val[0] val = as_html_list else: if not val.strip(): continue attachment_type = AttachmentType.TEXT problem_message = val self._attach_data(key, val, attachment_type=attachment_type) if problem_message is None and "job_messages" in job: job_messages = job.get("job_messages") if job_messages: problem_message = str(job_messages) test_result.labels.append( Label(name=LabelType.FRAMEWORK, value='planemo')) test_result.labels.append( Label(name=LabelType.LANGUAGE, value=platform_label())) self._record_tool_link(test_result, tool_id) self._record_status(test_result, test_data) if test_result.status in [Status.BROKEN, Status.FAILED]: test_result.statusDetails = StatusDetails( message=escape_non_unicode_symbols(problem_message or "Unknown problem"), trace=None) self.lifecycle.write_test_case()