def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) qa_string = "Unknown,None" if hasattr(item, "_metadata") and item._metadata.get('owner') is not None: # The owner is specified in metadata qa_string = "{},from metadata" else: try: qa_arr = [] results = dig_code(item) for idx in range(min(2, len(results))): qa_arr.append("{},{:.2f}%\n".format(results[idx][0], results[idx][1])) if qa_arr: qa_string = "".join(qa_arr) except: pass from fixtures.artifactor_plugin import SLAVEID slaveid = SLAVEID or "" art_client.fire_hook('filedump', test_location=location, test_name=name, description="QA Contact", contents=str(qa_string), file_type="qa_contact", group_id="qa-contact", slaveid=slaveid)
def resolve_blockers(item, blockers): if not isinstance(blockers, (list, tuple, set)): raise ValueError("Type of the 'blockers' parameter must be one of: list, tuple, set") # Prepare the global env for the kwarg insertion global_env = dict( appliance_version=version.current_version(), appliance_downstream=version.appliance_is_downstream(), item=item, blockers=blockers, ) # We will now extend the env with fixtures, so they can be used in the guard functions # We will however add only those that are not in the global_env otherwise we could overwrite # our own stuff. params = extract_fixtures_values(item) for funcarg, value in params.iteritems(): if funcarg not in global_env: global_env[funcarg] = value # Check blockers use_blockers = [] # Bugzilla shortcut blockers = map(lambda b: "BZ#{}".format(b) if isinstance(b, int) else b, blockers) for blocker in map(Blocker.parse, blockers): if blocker.blocks: use_blockers.append(blocker) # Unblocking discard_blockers = set([]) for blocker in use_blockers: unblock_func = kwargify(blocker.kwargs.get("unblock", None)) local_env = {"blocker": blocker} local_env.update(global_env) if unblock_func(**local_env): discard_blockers.add(blocker) for blocker in discard_blockers: use_blockers.remove(blocker) # We now have those that block testing, so we have to skip # Let's go in the order that they were added # Custom actions first for blocker in use_blockers: if "custom_action" in blocker.kwargs: action = kwargify(blocker.kwargs["custom_action"]) local_env = {"blocker": blocker} local_env.update(global_env) action(**local_env) # And then skip if use_blockers: name, location = get_test_idents(item) bugs = [bug.bug_id for bug in use_blockers if hasattr(bug, "bug_id")] skip_data = {'type': 'blocker', 'reason': bugs} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) pytest.skip("Skipping due to these blockers:\n{}".format( "\n".join( "- {}".format(str(blocker)) for blocker in use_blockers ) ))
def pytest_exception_interact(node, call, report): name, location = get_test_idents(node) val = unicode(call.excinfo.value) short_tb = '%s\n%s' % (call.excinfo.type.__name__, val.encode('ascii', 'ignore')) art_client.fire_hook('filedump', test_location=location, test_name=name, filename="traceback.txt", contents=str(report.longrepr), fd_ident="tb") art_client.fire_hook('filedump', test_location=location, test_name=name, filename="short-traceback.txt", contents=short_tb, fd_ident="short_tb") # base64 encoded to go into a data uri, same for screenshots full_tb = str(report.longrepr).encode('base64').strip() # errors are when exceptions are thrown outside of the test call phase report.when = getattr(report, 'when', 'setup') is_error = report.when != 'call' template_data = { 'name': node.name, 'file': node.fspath, 'is_error': is_error, 'fail_stage': report.when, 'short_tb': short_tb, 'full_tb': full_tb, } # Before trying to take a screenshot, we used to check if one of the browser_fixtures was # in this node's fixturenames, but that was too limited and preventing the capture of # screenshots. If removing that conditional now makes this too broad, we should consider # an isinstance(val, WebDriverException) check in addition to the browser fixture check that # exists here in commit 825ef50fd84a060b58d7e4dc316303a8b61b35d2 try: template_data['screenshot'] = utils.browser.browser().get_screenshot_as_base64() art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.png", fd_ident="screenshot", mode="wb", contents_base64=True, contents=template_data['screenshot']) except (AttributeError, WebDriverException): # See comments utils.browser.ensure_browser_open for why these two exceptions template_data['screenshot'] = None template_data['screenshot_error'] = 'browser error' art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.txt", fd_ident="screenshot", mode="w", contents_base64=False, contents=template_data['screenshot_error']) except Exception as ex: # If this fails for any other reason, # leave out the screenshot but record the reason template_data['screenshot'] = None if ex.message: screenshot_error = '%s: %s' % (type(ex).__name__, ex.message) else: screenshot_error = type(ex).__name__ template_data['screenshot_error'] = screenshot_error art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.txt", fd_ident="screenshot", mode="w", contents_base64=False, contents=template_data['screenshot_error']) failed_test_tracking['tests'].append(template_data) if is_error: failed_test_tracking['total_errored'] += 1 else: failed_test_tracking['total_failed'] += 1
def resolve_blockers(item, blockers): # Prepare the global env for the kwarg insertion global_env = dict( appliance_version=version.current_version(), appliance_downstream=version.appliance_is_downstream(), item=item, blockers=blockers, ) # We will now extend the env with fixtures, so they can be used in the guard functions # We will however add only those that are not in the global_env otherwise we could overwrite # our own stuff. params = extract_fixtures_values(item) for funcarg, value in params.iteritems(): if funcarg not in global_env: global_env[funcarg] = value # Check blockers use_blockers = [] if not isinstance(blockers, (list, tuple, set)): blockers = [blockers] # Bugzilla shortcut blockers = map(lambda b: "BZ#{}".format(b) if isinstance(b, int) else b, blockers) for blocker in map(Blocker.parse, blockers): if blocker.blocks: use_blockers.append(blocker) # Unblocking discard_blockers = set([]) for blocker in use_blockers: unblock_func = kwargify(blocker.kwargs.get("unblock", None)) local_env = {"blocker": blocker} local_env.update(global_env) if unblock_func(**local_env): discard_blockers.add(blocker) for blocker in discard_blockers: use_blockers.remove(blocker) # We now have those that block testing, so we have to skip # Let's go in the order that they were added # Custom actions first for blocker in use_blockers: if "custom_action" in blocker.kwargs: action = kwargify(blocker.kwargs["custom_action"]) local_env = {"blocker": blocker} local_env.update(global_env) action(**local_env) # And then skip if use_blockers: name, location = get_test_idents(item) bugs = [bug.bug_id for bug in use_blockers if hasattr(bug, "bug_id")] skip_data = {'type': 'blocker', 'reason': bugs} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) pytest.skip("Skipping due to these blockers:\n{}".format( "\n".join( "- {}".format(str(blocker)) for blocker in use_blockers ) ))
def pytest_runtest_call(item): """If we use register_event, then collect the events and fail the test if not all came. After the test function finishes, it checks the listener whether it has caught the events. It uses `soft_assert` fixture. Before and after each test run using `register_event` fixture, database is cleared. """ if "register_event" in item.funcargs: register_event = item.funcargs["register_event"] store.current_appliance.wait_for_ssh() register_event.delete_database() else: register_event = None try: yield finally: from fixtures.artifactor_plugin import SLAVEID if register_event is None: return node_id = item._nodeid # Event testing is enabled. try: logger.info("Checking the events to come.") wait_for(register_event.check_all_expectations, delay=5, num_sec=75, handle_exception=True) except TimedOutError: logger.warning("Some of the events seem to not have come!") else: logger.info("Seems like all events have arrived!") name, location = get_test_idents(item) art_client.fire_hook( "filedump", test_name=name, test_location=location, description="Event testing report", contents=HTMLReport( node_id, register_event.expectations, register_event.get_all_received_events() ).generate(), file_type="html", display_glyph="align-justify", group_id="misc-artifacts", slaveid=SLAVEID, ) logger.info("Clearing the database after testing ...") register_event.delete_database() soft_assert = item.funcargs["soft_assert"] for expectation in register_event.expectations: soft_assert( expectation.arrived, "Event {} for {} {} did not come!".format( expectation.event_type, expectation.target_type, expectation.target_id ), ) register_event.expectations = []
def pytest_runtest_call(item): """If we use register_event, then collect the events and fail the test if not all came. After the test function finishes, it checks the listener whether it has caught the events. It uses `soft_assert` fixture. Before and after each test run using `register_event` fixture, database is cleared. """ if "register_event" in item.funcargs: register_event = item.funcargs["register_event"] store.current_appliance.wait_for_ssh() register_event.delete_database() else: register_event = None try: yield finally: if register_event is None: return node_id = item._nodeid # Event testing is enabled. try: logger.info('Checking the events to come.') wait_for(register_event.check_all_expectations, delay=5, num_sec=75, handle_exception=True) except TimedOutError: logger.warning('Some of the events seem to not have come!') else: logger.info('Seems like all events have arrived!') name, location = get_test_idents(item) art_client.fire_hook( 'filedump', test_name=name, test_location=location, description="Event testing report", contents=HTMLReport( node_id, register_event.expectations, register_event.get_all_received_events()).generate(), file_type="html", display_glyph="align-justify", group_id="misc-artifacts", ) logger.info("Clearing the database after testing ...") register_event.delete_database() soft_assert = item.funcargs["soft_assert"] for expectation in register_event.expectations: soft_assert( expectation.arrived, "Event {} for {} {} did not come!".format( expectation.event_type, expectation.target_type, expectation.target_id)) register_event.expectations = []
def save_screenshot(node, ss, sse): name, location = get_test_idents(node) if ss: art_client.fire_hook('filedump', test_location=location, test_name=name, description="RBAC Screenshot", file_type="rbac_screenshot", mode="wb", contents_base64=True, contents=ss, display_glyph="camera", group_id="RBAC") if sse: art_client.fire_hook('filedump', test_location=location, test_name=name, description="RBAC Screenshot error", file_type="rbac_screenshot_error", mode="w", contents_base64=False, contents=sse, display_type="danger", group_id="RBAC")
def save_screenshot(node, ss, sse): name, location = get_test_idents(node) if ss: art_client.fire_hook('filedump', test_location=location, test_name=name, filename="rbac_screenshot.png", fd_ident="rbac_screenshot", mode="wb", contents_base64=True, contents=ss) if sse: art_client.fire_hook('filedump', test_location=location, test_name=name, filename="rbac_screenshot.txt", fd_ident="rbac_screenshot", mode="w", contents_base64=False, contents=sse)
def pytest_exception_interact(node, call, report): from fixtures.artifactor_plugin import SLAVEID name, location = get_test_idents(node) val = safe_string(call.excinfo.value.message).decode('utf-8', 'ignore') short_tb = '{}\n{}'.format( call.excinfo.type.__name__, val.encode('ascii', 'xmlcharrefreplace')) art_client.fire_hook('filedump', test_location=location, test_name=name, description="Traceback", contents=str(report.longrepr), file_type="traceback", display_type="danger", display_glyph="align-justify", group_id="pytest-exception", slaveid=SLAVEID) art_client.fire_hook('filedump', test_location=location, test_name=name, description="Short traceback", contents=short_tb, file_type="short_tb", display_type="danger", display_glyph="align-justify", group_id="pytest-exception", slaveid=SLAVEID) # base64 encoded to go into a data uri, same for screenshots full_tb = str(report.longrepr).encode('base64').strip() # errors are when exceptions are thrown outside of the test call phase report.when = getattr(report, 'when', 'setup') is_error = report.when != 'call' template_data = { 'name': node.name, 'file': node.fspath, 'is_error': is_error, 'fail_stage': report.when, 'short_tb': short_tb, 'full_tb': full_tb, } # Before trying to take a screenshot, we used to check if one of the browser_fixtures was # in this node's fixturenames, but that was too limited and preventing the capture of # screenshots. If removing that conditional now makes this too broad, we should consider # an isinstance(val, WebDriverException) check in addition to the browser fixture check that # exists here in commit 825ef50fd84a060b58d7e4dc316303a8b61b35d2 screenshot = take_screenshot() template_data['screenshot'] = screenshot.png template_data['screenshot_error'] = screenshot.error if screenshot.png: art_client.fire_hook('filedump', test_location=location, test_name=name, description="Exception screenshot", file_type="screenshot", mode="wb", contents_base64=True, contents=template_data['screenshot'], display_glyph="camera", group_id="pytest-exception", slaveid=SLAVEID) if screenshot.error: art_client.fire_hook('filedump', test_location=location, test_name=name, description="Screenshot error", mode="w", contents_base64=False, contents=template_data['screenshot_error'], display_type="danger", group_id="pytest-exception", slaveid=SLAVEID) failed_test_tracking['tests'].append(template_data) if is_error: failed_test_tracking['total_errored'] += 1 else: failed_test_tracking['total_failed'] += 1
def pytest_collection_modifyitems(session, config, items): if not config.getvalue('composite_uncollect'): return from fixtures.artifactor_plugin import get_test_idents from fixtures.pytest_store import store from utils.log import logger from utils.trackerbot import composite_uncollect len_collected = len(items) new_items = [] build = store.current_appliance.build pl = composite_uncollect(build) if pl: for test in pl['tests']: pl['tests'][test]['old'] = True # Here we pump into artifactor # art_client.fire_hook('composite_pump', old_artifacts=pl['tests']) for item in items: try: name, location = get_test_idents(item) test_ident = "{}/{}".format(location, name) status = pl['tests'][test_ident]['statuses']['overall'] if status == 'passed': logger.info( 'Uncollecting {} as it passed last time'.format( item.name)) continue else: new_items.append(item) except: new_items.append(item) items[:] = new_items len_filtered = len(items) filtered_count = len_collected - len_filtered if filtered_count: # A warning should go into log/cfme.log when a test has this mark applied. # It might be good to write uncollected test names out via terminalreporter, # but I suspect it would be extremely spammy. It might be useful in the # --collect-only output? store.terminalreporter.write( '{} tests uncollected because they previously passed'.format( filtered_count), bold=True)
def pytest_collection_modifyitems(session, config, items): if not config.getvalue('composite_uncollect'): return from fixtures.artifactor_plugin import get_test_idents from fixtures.pytest_store import store from cfme.utils.log import logger from cfme.utils.trackerbot import composite_uncollect len_collected = len(items) new_items = [] build = store.current_appliance.build source = config.getoption('composite_source') if not source: source = 'jenkins' store.terminalreporter.write( 'Attempting Uncollect for build: {} and source: {}'.format( build, source), bold=True) pl = composite_uncollect(build, source) if pl: for test in pl['tests']: pl['tests'][test]['old'] = True # Here we pump into artifactor # art_client.fire_hook('composite_pump', old_artifacts=pl['tests']) for item in items: try: name, location = get_test_idents(item) test_ident = "{}/{}".format(location, name) status = pl['tests'][test_ident]['statuses']['overall'] if status == 'passed': logger.info( 'Uncollecting {} as it passed last time'.format( item.name)) continue else: new_items.append(item) except: new_items.append(item) items[:] = new_items len_filtered = len(items) filtered_count = len_collected - len_filtered store.uncollection_stats['composite_uncollect'] = filtered_count
def __init__(self, request): """ A simple adapter to aid in Merkyl Log Inspection during a test. This class is really only useful during a test and is designed to abstract away accessing the request object. The hooks which are fired can be done so during the test without this class/fixture, this is merely a convenience and does nothing special. """ name, location = get_test_idents(request.node) self.test_name = name self.test_location = location self.ip = appliance_ip_address
def pytest_collection_modifyitems(session, config, items): if not config.getvalue('composite_uncollect'): return from fixtures.artifactor_plugin import get_test_idents from fixtures.pytest_store import store from cfme.utils.log import logger from cfme.utils.trackerbot import composite_uncollect len_collected = len(items) new_items = [] build = store.current_appliance.build if str(store.current_appliance.version) not in build: build = "{}-{}".format(str(store.current_appliance.version), build) source = config.getoption('composite_source') if not source: source = 'jenkins' store.terminalreporter.write( 'Attempting Uncollect for build: {} and source: {}'.format(build, source), bold=True) pl = composite_uncollect(build, source) if pl: for test in pl['tests']: pl['tests'][test]['old'] = True # Here we pump into artifactor # art_client.fire_hook('composite_pump', old_artifacts=pl['tests']) for item in items: try: name, location = get_test_idents(item) test_ident = "{}/{}".format(location, name) status = pl['tests'][test_ident]['statuses']['overall'] if status == 'passed': logger.info('Uncollecting {} as it passed last time'.format(item.name)) continue else: new_items.append(item) except: new_items.append(item) items[:] = new_items len_filtered = len(items) filtered_count = len_collected - len_filtered store.uncollection_stats['composite_uncollect'] = filtered_count
def skip(provider_key, previous_fail=False): if request: node = request.node name, location = get_test_idents(node) skip_data = {'type': 'provider', 'reason': provider_key} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) if previous_fail: raise pytest.skip('Provider {} failed to set up previously in another test, ' 'skipping test'.format(provider_key)) else: raise pytest.skip('Provider {} failed to set up this time, ' 'skipping test'.format(provider_key))
def save_traceback_file(node, contents): """A convenience function for artifactor file sending This function simply takes the nodes id and the contents of the file and processes them and sends them to artifactor Args: node: A pytest node contents: The contents of the traceback file """ name, location = get_test_idents(node) art_client.fire_hook('filedump', test_location=location, test_name=name, description="RBAC Traceback", contents=contents, file_type="rbac", group_id="RBAC")
def pytest_runtest_call(item): """If we use register_event, then collect the events and fail the test if not all came. After the test function finishes, it checks the listener whether it has caught the events. It uses `soft_assert` fixture. Before and after each test run using `register_event` fixture, database is cleared. """ try: yield finally: if "register_event" not in item.funcargs: return node_id = item._nodeid register_event = item.funcargs["register_event"] # If the event testing is disabled, skip the collection and failing if register_event.listener is None: return # Event testing is enabled. try: wait_for(register_event.check_all_expectations, delay=5, num_sec=75, handle_exception=True) except TimedOutError: pass name, location = get_test_idents(item) art_client.fire_hook( 'filedump', test_name=name, test_location=location, description="Event testing report", contents=HTMLReport( node_id, register_event.expectations, register_event.get_all_received_events()).generate(), file_type="html", display_glyph="align-justify", group_id="misc-artifacts", ) logger.info("Clearing the database after testing ...") register_event._delete_database() soft_assert = item.funcargs["soft_assert"] for expectation in register_event.expectations: soft_assert(expectation.arrived, "Event {} did not come!".format(expectation.event)) register_event.expectations = []
def pytest_runtest_call(item): """If we use register_event, then collect the events and fail the test if not all came. After the test function finishes, it checks the listener whether it has caught the events. It uses `soft_assert` fixture. Before and after each test run using `register_event` fixture, database is cleared. """ try: yield finally: if "register_event" not in item.funcargs: return node_id = item._nodeid register_event = item.funcargs["register_event"] # If the event testing is disabled, skip the collection and failing if register_event.listener is None: return # Event testing is enabled. try: wait_for(register_event.check_all_expectations, delay=5, num_sec=75, handle_exception=True) except TimedOutError: pass name, location = get_test_idents(item) art_client.fire_hook( 'filedump', test_name=name, test_location=location, description="Event testing report", contents=HTMLReport( node_id, register_event.expectations, register_event.get_all_received_events() ).generate(), file_type="html", display_glyph="align-justify", group_id="misc-artifacts", ) logger.info("Clearing the database after testing ...") register_event._delete_database() soft_assert = item.funcargs["soft_assert"] for expectation in register_event.expectations: soft_assert(expectation.arrived, "Event {} did not come!".format(expectation.event)) register_event.expectations = []
def skip(provider_key, previous_fail=False): if request: node = request.node name, location = get_test_idents(node) skip_data = {'type': 'provider', 'reason': provider_key} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) if previous_fail: raise pytest.skip( 'Provider {} failed to set up previously in another test, ' 'skipping test'.format(provider_key)) else: raise pytest.skip('Provider {} failed to set up this time, ' 'skipping test'.format(provider_key))
def pytest_exception_interact(node, call, report): name, location = get_test_idents(node) val = call.excinfo.value.message.decode('utf-8', 'ignore') short_tb = '%s\n%s' % (call.excinfo.type.__name__, val.encode('ascii', 'xmlcharrefreplace')) art_client.fire_hook('filedump', test_location=location, test_name=name, filename="traceback.txt", contents=str(report.longrepr), fd_ident="tb") art_client.fire_hook('filedump', test_location=location, test_name=name, filename="short-traceback.txt", contents=short_tb, fd_ident="short_tb") # base64 encoded to go into a data uri, same for screenshots full_tb = str(report.longrepr).encode('base64').strip() # errors are when exceptions are thrown outside of the test call phase report.when = getattr(report, 'when', 'setup') is_error = report.when != 'call' template_data = { 'name': node.name, 'file': node.fspath, 'is_error': is_error, 'fail_stage': report.when, 'short_tb': short_tb, 'full_tb': full_tb, } # Before trying to take a screenshot, we used to check if one of the browser_fixtures was # in this node's fixturenames, but that was too limited and preventing the capture of # screenshots. If removing that conditional now makes this too broad, we should consider # an isinstance(val, WebDriverException) check in addition to the browser fixture check that # exists here in commit 825ef50fd84a060b58d7e4dc316303a8b61b35d2 screenshot = take_screenshot() template_data['screenshot'] = screenshot.png template_data['screenshot_error'] = screenshot.error if screenshot.png: art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.png", fd_ident="screenshot", mode="wb", contents_base64=True, contents=template_data['screenshot']) if screenshot.error: art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.txt", fd_ident="screenshot", mode="w", contents_base64=False, contents=template_data['screenshot_error']) failed_test_tracking['tests'].append(template_data) if is_error: failed_test_tracking['total_errored'] += 1 else: failed_test_tracking['total_failed'] += 1
def save_traceback_file(node, contents): """A convenience function for artifactor file sending This function simply takes the nodes id and the contents of the file and processes them and sends them to artifactor Args: node: A pytest node contents: The contents of the traceback file """ name, location = get_test_idents(node) art_client.fire_hook('filedump', test_location=location, test_name=name, filename="rbac-traceback.txt", contents=contents, fd_ident="rbac")
def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) qa_string = "Unknown,None" if hasattr(item, "_metadata") and item._metadata.get('owner') is not None: # The owner is specified in metadata qa_string = "{},from metadata" else: try: qa_arr = [] results = dig_code(item) for idx in range(min(2, len(results))): qa_arr.append("{},{:.2f}%\n".format(results[idx][0], results[idx][1])) if qa_arr: qa_string = "".join(qa_arr) except: pass art_client.fire_hook('filedump', test_location=location, test_name=name, filename="qa_contact.txt", contents=str(qa_string), fd_ident="qa")
def pytest_collection_modifyitems(session, config, items): if not config.getvalue('composite_uncollect'): return len_collected = len(items) new_items = [] build = store.current_appliance.build pl = composite_uncollect(build) if pl: for test in pl['tests']: pl['tests'][test]['old'] = True # Here we pump into artifactor # art_client.fire_hook('composite_pump', old_artifacts=pl['tests']) for item in items: try: name, location = get_test_idents(item) test_ident = "{}/{}".format(location, name) status = pl['tests'][test_ident]['statuses']['overall'] if status == 'passed': logger.info('Uncollecting {} as it passed last time'.format(item.name)) continue else: new_items.append(item) except: new_items.append(item) items[:] = new_items len_filtered = len(items) filtered_count = len_collected - len_filtered if filtered_count: # A warning should go into log/cfme.log when a test has this mark applied. # It might be good to write uncollected test names out via terminalreporter, # but I suspect it would be extremely spammy. It might be useful in the # --collect-only output? store.terminalreporter.write( '{} tests uncollected because they previously passed'.format(filtered_count), bold=True)
def pytest_exception_interact(node, call, report): name, location = get_test_idents(node) if hasattr(node, "_metadata") and node._metadata.get('owner') is not None: # The owner is specified in metadata art_client.fire_hook( 'filedump', test_location=location, test_name=name, filename="qa_contact.txt", contents="{} (from metadata)".format(node._metadata.owner), fd_ident="qa") return try: qa_arr = [] results = dig_code(node) for idx in range(min(2, len(results))): qa_arr.append("{} ({:.2f}%)".format(results[idx][0], results[idx][1])) qa_string = ", ".join(qa_arr) except: qa_string = "Unknown" art_client.fire_hook('filedump', test_location=location, test_name=name, filename="qa_contact.txt", contents=str(qa_string), fd_ident="qa")
def pytest_runtest_call(item): try: yield finally: if "smtp_test" not in item.funcargs: return name, location = get_test_idents(item) try: art_client.fire_hook( "filedump", test_name=name, test_location=location, filename="emails.html", contents=item.funcargs["smtp_test"].get_html_report(), fd_ident="emails") except Exception as e: logger.exception(e) logger.error("Something happened to the SMTP collector.")
def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) qa_string = "Unknown,None" if hasattr(item, "_metadata") and item._metadata.get('owner') is not None: # The owner is specified in metadata qa_string = "{},from metadata" else: try: qa_arr = [] results = dig_code(item) for idx in range(min(2, len(results))): qa_arr.append("{},{:.2f}%\n".format(results[idx][0], results[idx][1])) if qa_arr: qa_string = "".join(qa_arr) except: pass from fixtures.artifactor_plugin import SLAVEID art_client.fire_hook( 'filedump', test_location=location, test_name=name, description="QA Contact", contents=str(qa_string), file_type="qa_contact", group_id="qa-contact", slaveid=SLAVEID)
def pytest_runtest_call(item): try: yield finally: if "smtp_test" not in item.funcargs: return name, location = get_test_idents(item) try: art_client.fire_hook( "filedump", test_name=name, test_location=location, filename="emails.html", contents=item.funcargs["smtp_test"].get_html_report(), fd_ident="emails" ) except Exception as e: logger.exception(e) logger.error("Something happened to the SMTP collector.")
def pytest_runtest_call(item): try: yield finally: if "smtp_test" not in item.funcargs: return name, location = get_test_idents(item) try: art_client.fire_hook( "filedump", test_name=name, test_location=location, description="received e-mails", contents=item.funcargs["smtp_test"].get_html_report(), file_type="html", display_glyph="align-justify", group_id="misc-artifacts", ) except Exception as e: logger.exception(e) logger.error("Something happened to the SMTP collector.")
def pytest_exception_interact(node, call, report): name, location = get_test_idents(node) val = unicode(call.excinfo.value) short_tb = '%s\n%s' % (call.excinfo.type.__name__, val.encode('ascii', 'ignore')) art_client.fire_hook('filedump', test_location=location, test_name=name, filename="traceback.txt", contents=str(report.longrepr), fd_ident="tb") art_client.fire_hook('filedump', test_location=location, test_name=name, filename="short-traceback.txt", contents=short_tb, fd_ident="short_tb") # base64 encoded to go into a data uri, same for screenshots full_tb = str(report.longrepr).encode('base64').strip() # errors are when exceptions are thrown outside of the test call phase report.when = getattr(report, 'when', 'setup') is_error = report.when != 'call' template_data = { 'name': node.name, 'file': node.fspath, 'is_error': is_error, 'fail_stage': report.when, 'short_tb': short_tb, 'full_tb': full_tb, } # Before trying to take a screenshot, we used to check if one of the browser_fixtures was # in this node's fixturenames, but that was too limited and preventing the capture of # screenshots. If removing that conditional now makes this too broad, we should consider # an isinstance(val, WebDriverException) check in addition to the browser fixture check that # exists here in commit 825ef50fd84a060b58d7e4dc316303a8b61b35d2 screenshot = take_screenshot() template_data['screenshot'] = screenshot.png template_data['screenshot_error'] = screenshot.error if screenshot.png: art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.png", fd_ident="screenshot", mode="wb", contents_base64=True, contents=template_data['screenshot']) if screenshot.error: art_client.fire_hook('filedump', test_location=location, test_name=name, filename="screenshot.txt", fd_ident="screenshot", mode="w", contents_base64=False, contents=template_data['screenshot_error']) failed_test_tracking['tests'].append(template_data) if is_error: failed_test_tracking['total_errored'] += 1 else: failed_test_tracking['total_failed'] += 1
def pytest_collection_modifyitems(session, config, items): if not config.getvalue('composite_uncollect'): return from fixtures.artifactor_plugin import get_test_idents from fixtures.pytest_store import store from cfme.utils.log import logger from cfme.utils.trackerbot import composite_uncollect len_collected = len(items) new_items = [] build = store.current_appliance.build if str(store.current_appliance.version) not in build: build = "{}-{}".format(str(store.current_appliance.version), build) source = config.getoption('composite_source') if not source: source = 'jenkins' store.terminalreporter.write( 'Attempting Uncollect for build: {} and source: {}\n'.format(build, source), bold=True) # The following code assumes slaves collect AFTER master is done, this prevents a parallel # speed up, but in the future we may move uncollection to a later stage and only do it on # master anyway. if store.parallelizer_role == 'master': # Master always stores the composite uncollection store.terminalreporter.write('Storing composite uncollect in cache...\n') pl = composite_uncollect(build, source) config.cache.set('miq-composite-uncollect', pl) else: # Slaves always retrieve from cache logger.info('Slave retrieving composite uncollect from cache') pl = config.cache.get('miq-composite-uncollect', None) if pl: for test in pl['tests']: pl['tests'][test]['old'] = True # Here we pump into artifactor # art_client.fire_hook('composite_pump', old_artifacts=pl['tests']) for item in items: try: name, location = get_test_idents(item) test_ident = "{}/{}".format(location, name) status = pl['tests'][test_ident]['statuses']['overall'] if status == 'passed': logger.info('Uncollecting {} as it passed last time'.format(item.name)) continue else: new_items.append(item) except: new_items.append(item) items[:] = new_items len_filtered = len(items) filtered_count = len_collected - len_filtered store.uncollection_stats['composite_uncollect'] = filtered_count