Ejemplo n.º 1
0
def pytest_runtest_protocol(item, nextitem):
    if item.config.getoption('polarion_run') is not None:
        reports = runtestprotocol(item, nextitem=nextitem)

        # get polarion objects
        tr = item.config.getoption('test_run_obj')
        tc = item.config.getoption('test_run_records')[item.get_marker("polarion_id").args[0]]

        for report in reports:
            if report.when == 'call':
                # print '\n%s --- %s --- %s' % (item.name, item.get_marker("polarion_id"), report.outcome)

                # Build up traceback massage
                trace = ''
                if not report.passed:
                    trace = '{0}:{1}\n{2}'.format(report.location, report.when, report.longrepr)

                tc.result = report.outcome
                tc.executed = datetime.datetime.now()
                tc.executed_by = tc.logged_in_user_id
                tc.duration = report.duration
                tc.comment = trace
                polarion_set_record(tr, tc)
            elif report.when == 'setup' and report.skipped:
                tc.result = 'blocked'
                tc.executed_by = tc.logged_in_user_id
                tc.comment = item.get_marker('skipif').kwargs['reason']
                polarion_set_record(tr, tc)
        # Final polarion record update
        return True
Ejemplo n.º 2
0
 def runforked():
     try:
         reports = runtestprotocol(item, log=False)
     except KeyboardInterrupt:
         py.std.os._exit(EXITSTATUS_TESTEXIT)
     finally:
         queue.publish(TEST_CAHNNEL_ID, {'type': 'TESTDONE'})
     return marshal.dumps([serialize_report(x) for x in reports])
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for
    the test case and the other for the teardown error.
    """
    rerun_marker = item.get_marker("flaky")
    # use the marker as a priority over the global setting.
    if rerun_marker is not None:
        if "reruns" in rerun_marker.kwargs:
            # check for keyword arguments
            reruns = rerun_marker.kwargs["reruns"]
        elif len(rerun_marker.args) > 0:
            # check for arguments
            reruns = rerun_marker.args[0]
        else:
            reruns = 1
    elif item.session.config.option.reruns is not None:
        # default to the global setting
        reruns = item.session.config.option.reruns
    else:
        # global setting is not specified, and this test is not marked with
        # flaky
        return

    # while this doesn't need to be run with every item, it will fail on the
    # first item if necessary
    check_options(item.session.config)

    for i in range(reruns + 1):  # ensure at least one run of each item
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, test, teardown
            report.rerun = i
            xfail = hasattr(report, "wasxfail")
            if i == reruns or not report.failed or xfail:
                # last run or no failure detected, log normally
                item.ihook.pytest_runtest_logreport(report=report)
            else:
                # failure detected and reruns not exhausted, since i < reruns
                report.outcome = "rerun"

                # When running tests in parallel using pytest-xdist the first
                # report that is logged will finish and terminate the current
                # node rather rerunning the test. Thus we must skip logging of
                # intermediate results when running in parallel, otherwise no
                # test is rerun.
                # See: https://github.com/pytest-dev/pytest/issues/1193
                parallel_testing = hasattr(item.config, "slaveinput")
                if not parallel_testing:
                    # will rerun test, log intermediate result
                    item.ihook.pytest_runtest_logreport(report=report)

                break  # trigger rerun
        else:
            return True  # no need to rerun

    return True
Ejemplo n.º 4
0
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for the test
    case and the other for the teardown error.
    """

    check_options(item.session.config)

    item.ihook.pytest_runtest_logstart(
        nodeid=item.nodeid, location=item.location,
    )
    # If rerun after is enabled, we should skip already scheduled reruns (that was scheduled before threshold reached)
    if  item.attempt > 1 and item.config.option.rerun_after and item.session.rerun_tests_durations > item.config.option.rerun_time_threshold:
        reason = "total rerun threshold reached"
        print "rerun skipped, reason: " + reason + " testcase: " + item.nodeid
        # Do not touch item report status here
        # Just decrease attempt count (was increased while scheduling test to rerun
        item.attempt -= 1
    else:
        # Do test execution and assign report status
        item.reports = runtestprotocol(item, nextitem=nextitem, log=False)
    # Update cumulative test durations
    update_test_durations(item.reports, item.session, item.attempt)
    # Get test status (aware of rerun)
    test_succeed, test_aborted, status_message = report_test_status(item, item.reports)

    if item.config.option.verbose:
        print item.nodeid, " attepmt " + str(item.attempt)

    qualify_rerun = False
    if test_succeed or test_aborted:
        pass
    else:
        # Check rerun conditions
        qualify, reason = qualify_for_rerun(item, item.reports)
        if not (qualify):
            print "rerun skipped, reason: " + reason + " testcase: " + item.nodeid
        else:
            # Schedule item to be executed somewhere in future
            schedule_item_rerun(item, item.config)
            qualify_rerun = True

    # Update report attempt field (to report these values)
    for report in item.reports:
        # Only update for "call" (not setup and teardown)
        if report.when in ("call"):
            report.attempt = item.attempt
        # If test is scheduled for rerun, results are not final, so we don't generate report
        if not qualify_rerun:
            item.ihook.pytest_runtest_logreport(report=report)
        # For debug puproses
        verbose_output(item)

    # pytest_runtest_protocol returns True
    return True
Ejemplo n.º 5
0
 def test_xfail_evalfalse_but_fails(self, testdir):
     item = testdir.getitem("""
         import pytest
         @pytest.mark.xfail('False')
         def test_func():
             assert 0
     """)
     reports = runtestprotocol(item, log=False)
     callreport = reports[1]
     assert callreport.failed
     assert 'xfail' not in callreport.keywords
Ejemplo n.º 6
0
 def test_xfail_xpassed(self, testdir):
     item = testdir.getitem("""
         import pytest
         @pytest.mark.xfail(reason="this is an xfail")
         def test_func():
             assert 1
     """)
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.passed
     assert callreport.wasxfail == "this is an xfail"
Ejemplo n.º 7
0
 def test_xfail_simple(self, testdir, strict):
     item = testdir.getitem("""
         import pytest
         @pytest.mark.xfail(strict=%s)
         def test_func():
             assert 0
     """ % strict)
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.skipped
     assert callreport.wasxfail == ""
Ejemplo n.º 8
0
 def test_xfail_xpassed(self, testdir):
     item = testdir.getitem("""
         import pytest
         @pytest.mark.xfail
         def test_func():
             assert 1
     """)
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.failed
     expl = callreport.keywords['xfail']
     assert expl == ""
Ejemplo n.º 9
0
 def test_xfail_xpassed_strict(self, testdir):
     item = testdir.getitem("""
         import pytest
         @pytest.mark.xfail(strict=True, reason="nope")
         def test_func():
             assert 1
     """)
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.failed
     assert callreport.longrepr == "[XPASS(strict)] nope"
     assert not hasattr(callreport, "wasxfail")
Ejemplo n.º 10
0
 def test_xfail_simple(self, testdir, strict):
     item = testdir.getitem(
         """
         import pytest
         @pytest.mark.xfail(strict=%s)
         def test_func():
             assert 0
     """
         % strict
     )
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.skipped
     assert callreport.wasxfail == ""
Ejemplo n.º 11
0
 def test_xfail_using_platform(self, testdir):
     """
     Verify that platform can be used with xfail statements.
     """
     item = testdir.getitem(
         """
         import pytest
         @pytest.mark.xfail("platform.platform() == platform.platform()")
         def test_func():
             assert 0
     """
     )
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.wasxfail
Ejemplo n.º 12
0
 def test_xfail_using_platform(self, testdir):
     """
     Verify that platform can be used with xfail statements.
     """
     item = testdir.getitem(
         """
         import pytest
         @pytest.mark.xfail("platform.platform() == platform.platform()")
         def test_func():
             assert 0
     """
     )
     reports = runtestprotocol(item, log=False)
     assert len(reports) == 3
     callreport = reports[1]
     assert callreport.wasxfail
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for
    the test case and the other for the teardown error.
    """

    reruns = get_reruns_count(item)
    if reruns is None:
        # global setting is not specified, and this test is not marked with
        # flaky
        return

    # while this doesn't need to be run with every item, it will fail on the
    # first item if necessary
    check_options(item.session.config)
    delay = get_reruns_delay(item)
    parallel = hasattr(item.config, 'slaveinput')
    item.execution_count = 0

    while True:
        item.execution_count += 1
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                           location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, call, teardown
            report.rerun = item.execution_count - 1
            xfail = hasattr(report, 'wasxfail')
            if item.execution_count > reruns or not report.failed or xfail:
                # last run or no failure detected, log normally
                item.ihook.pytest_runtest_logreport(report=report)
            else:
                # failure detected and reruns not exhausted, since i < reruns
                report.outcome = 'rerun'
                time.sleep(delay)

                if not parallel or works_with_current_xdist():
                    # will rerun test, log intermediate result
                    item.ihook.pytest_runtest_logreport(report=report)

                # cleanin item's cashed results from any level of setups
                _remove_cached_results_from_failed_fixtures(item)
                _remove_failed_setup_state_from_session(item)

                break  # trigger rerun
        else:
            return True  # no need to rerun
Ejemplo n.º 14
0
 def call_target(self, target_item):
     """
     Call target test after some tests which were run before
     and if one of result (setup, call, teardown) failed mark as coupled
     :param _pytest.python.Function target_item: current flaky test
     """
     failed_report = None
     with self.log(target_item) as logger:
         reports = runtestprotocol(target_item, log=False)
         for report in reports:  # 3 reports: setup, call, teardown
             if report.failed is True:
                 refresh_state(item=target_item)
                 logger(report=report)
                 failed_report = report
                 continue
             logger(report=report)
     return failed_report  # setup, call, teardown must success
Ejemplo n.º 15
0
def pytest_runtest_protocol(item, nextitem):
    """ runtest_setup/call/teardown protocol implementation. """
    condition = get_condition(item)
    if not condition:
        # The test doesn't have the random marker or doesn't
        # fulfill the condition, so we run the test normally
        return

    repeat = get_repeat(item)
    mode = get_mode(item)

    for i in range(repeat):
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                           location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, call, teardown
            report.total_repeat = repeat
            report.repeat = i

            if mode == "RELAX":
                condition = not report.failed or hasattr(report, "wasxfail")
            elif mode == "STRICT":
                condition = report.failed or report.skipped

            # we only mess with the report if it's a call report
            if i == repeat - 1 or condition or report.when != "call":
                # last run or no failure detected
                if mode == "STRICT" and i == repeat - 1 and report.when == "call":
                    # in STRICT mode, if the it never fails, then fail completely
                    report.outcome = "failed"
                    report.sections.append(
                        ("", f"The test {item.nodeid!r} is no more instable."))

                # log normally
                item.ihook.pytest_runtest_logreport(report=report)
            else:
                # failure detected and repeat not exhausted, since i < repeat
                report.outcome = "repeat"
                item.ihook.pytest_runtest_logreport(report=report)

                break  # trigger repeat
        else:
            return True  # no need to repeat
    return True
Ejemplo n.º 16
0
def pytest_runtest_protocol(item, nextitem):
    """ runtest_setup/call/teardown protocol implementation. """
    condition = get_condition(item)
    if not condition:
        # The test doesn't have the random marker or doesn't
        # fulfill the condition, so we run the test normally
        return

    repeat = get_repeat(item)
    mode = get_mode(item)

    for i in range(repeat):
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, call, teardown
            report.total_repeat = repeat
            report.repeat = i

            if mode == "RELAX":
                condition = not report.failed or hasattr(report, "wasxfail")
            elif mode == "STRICT":
                condition = report.failed or report.skipped

            # we only mess with the report if it's a call report
            if i == repeat - 1 or condition or not report.when == "call":
                # last run or no failure detected
                if mode == "STRICT" and i == repeat - 1 and report.when == "call":
                    # in STRICT mode, if the it never fails, then fail completely
                    report.outcome = "failed"
                    report.sections.append(
                        ("", f"The test {item.nodeid!r} is no more instable.")
                    )

                # log normally
                item.ihook.pytest_runtest_logreport(report=report)
            else:
                # failure detected and repeat not exhausted, since i < repeat
                report.outcome = "repeat"
                item.ihook.pytest_runtest_logreport(report=report)

                break  # trigger repeat
        else:
            return True  # no need to repeat
    return True
Ejemplo n.º 17
0
    def run_mutations(self):
        '''
        Run the test module once for each mutant.
        Handle the --quick-mut option and the cache
        '''
        if self.session.config.getoption(pl.QUICK_MUTATIONS):
            self.mutants = [x for x in self.mutants if x.nb_catches == 0]
            if len(self.mutants) == 0:
                return

        self.reporter._tw.line()
        self.reporter.write_line("Module " + self.basename + ":")

        if len(self.mutants) == 0:
            self.reporter.write_line("No mutant registered",
                                     **{"purple": True})

        for mutant in self.mutants:
            self.check_cache_and_rearrange(mutant.name)

            mg.g_current_mutant = mutant
            all_test_passed = True
            skip = False
            for item in self.collection:
                if not skip:
                    saved_globals = self.modify_environment(item, mutant)
                    reports = runtestprotocol(item)
                    if any(("failed" in report.outcome) for report in reports):
                        self.write_in_cache(item, mutant.name)
                        mutant.nb_catches += 1
                        all_test_passed = False
                        if self.session.config.getoption(pl.QUICK_MUTATIONS):
                            skip = True
                    self.restore_environment(item, mutant, saved_globals)
                else:
                    self.reporter.write(" ")

            mg.g_current_mutant = None

            if all_test_passed:
                self.reporter.write_line("\t" + mutant.name +
                                         "\t/!\ ALL TESTS PASSED")
                pl.mutants_passed_all_tests[self.basename].append(mutant.name)
            else:
                self.reporter.write_line("\t" + mutant.name)
Ejemplo n.º 18
0
 def call_items(self, target_item, items):
     """
     Call all items before target test
     and if one of result (setup, call, teardown) failed mark as flaky
     :param _pytest.python.Function target_item: test which should fail
     :param Bucket[_pytest.python.Function] items: bucket of tests
     """
     for next_idx, test_func in enumerate(items, 1):
         with self.log(test_func) as logger:
             next_item = items[next_idx] if next_idx < len(
                 items) else target_item
             reports = runtestprotocol(item=test_func,
                                       nextitem=next_item,
                                       log=False)
             for report in reports:  # 3 reports: setup, call, teardown
                 if report.failed is True:
                     report.outcome = 'flaky'
                 logger(report=report)
Ejemplo n.º 19
0
def pytest_runtest_protocol(item, nextitem):
    """
    This function is used to catch current status of test running.
    """

    reports = runtestprotocol(item, nextitem=nextitem)
    for report in reports:
        if report.outcome == 'skipped':
            evalxfail = getattr(item, '_evalxfail', None)
            if evalxfail:
                report.wasxfail = evalxfail.getexplanation()
            res_list.append({'test': item, 'result': report})
        if report.when == 'call':
            test_details = {'test': item, 'result': report}
            res_list.append(test_details)

    write_using_jinja(res_list)
    return True
Ejemplo n.º 20
0
def pytest_runtest_protocol(item, nextitem):
    m = _get_marker(item)
    if m is None:
        return None

    reports = None
    with _RECORDER.current_run():
        n, max_n = _get_sample_range(m)
        s = 0
        while s < n:
            item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                               location=item.location)
            reports = runtestprotocol(item, nextitem=nextitem, log=False)
            s += 1
            if s == n and _has_failed(reports):
                n = min(n + 1, max_n)

    _report_last_run(item, reports)
    return True
Ejemplo n.º 21
0
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for the test
    case and the other for the teardown error.

    Note: in some versions of py.test, when setup fails on a test that has been marked with xfail, 
    it gets an XPASS rather than an XFAIL 
    (https://bitbucket.org/hpk42/pytest/issue/160/an-exception-thrown-in)
    fix should be released in version 2.2.5
    """
    reruns = item.session.config.option.reruns
    if reruns == 0:
        return
    # while this doesn't need to be run with every item, it will fail on the first 
    # item if necessary
    check_options(item.session.config)

    item.ihook.pytest_runtest_logstart(
        nodeid=item.nodeid, location=item.location,
    )

    for i in range(reruns+1):  # ensure at least one run of each item
        reports = runtestprotocol(item, nextitem=nextitem, log=False)
        # break if setup and call pass
        if reports[0].passed and reports[1].passed:
            break

        # break if test marked xfail
        evalxfail = getattr(item, '_evalxfail', None)
        if evalxfail:
            break

    for report in reports:
        if report.when in ("call"):
            if i > 0:
                report.rerun = i
        item.ihook.pytest_runtest_logreport(report=report)

    # pytest_runtest_protocol returns True
    return True
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for
    the test case and the other for the teardown error.
    """

    rerun_setup = get_rerun_setup_count(item)

    if rerun_setup is None:
        # global setting is not specified, no setup reruns
        return

    # while this doesn't need to be run with every item, it will fail on the
    # first item if necessary
    check_options(item.session.config)
    parallel = hasattr(item.config, 'slaveinput')
    item.execution_count = 0

    need_to_run = True
    while need_to_run:
        item.execution_count += 1
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                           location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, call, teardown
            report.failed_to_verify = False
            if report.when == 'setup':
                report.rerun = item.execution_count - 1
                xfail = hasattr(report, 'wasxfail')

                if item.execution_count > rerun_setup and _failed(report):
                    # last run and failure detected on setup
                    report.failed_to_verify = True
                    item.ihook.pytest_runtest_logreport(report=report)

                elif item.execution_count > rerun_setup and _passed(
                        report) or report.skipped and not xfail:
                    # last run and no failure detected, log normally
                    item.ihook.pytest_runtest_logreport(report=report)

                elif item.execution_count > rerun_setup and xfail and not report.passed:
                    # last run and setup failed on xfail (remove any xfail traces, otherwise pytest exits with code 0)
                    report.outcome = 'failed'
                    report.failed_to_verify = True
                    del report.wasxfail
                    item.ihook.pytest_runtest_logreport(report=report)

                elif item.execution_count > rerun_setup:
                    item.ihook.pytest_runtest_logreport(report=report)

                elif report.passed:
                    item.ihook.pytest_runtest_logreport(report=report)

                else:
                    report.outcome = 'setup rerun'
                    _clear_cache(parallel, report, item)
                    break  # trigger rerun
            else:
                item.ihook.pytest_runtest_logreport(report=report)
        else:
            need_to_run = False

        item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid,
                                            location=item.location)

    return True
Ejemplo n.º 23
0
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for
    the test case and the other for the teardown error.
    """

    reruns = get_reruns_count(item)

    rerun_setup = get_rerun_setup_count(item)

    if reruns is None and rerun_setup is None:
        # global setting is not specified, and this test is not marked with
        # flaky
        return

    # while this doesn't need to be run with every item, it will fail on the
    # first item if necessary
    check_options(item.session.config)
    delay = get_reruns_delay(item)
    parallel = hasattr(item.config, 'slaveinput')
    item.execution_count = 0

    need_to_run = True
    while need_to_run:
        item.execution_count += 1
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                           location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, call, teardown
            report.failed_to_verify = False
            if report.when == 'setup':
                report.rerun = item.execution_count - 1
                xfail = hasattr(report, 'wasxfail')
                if item.execution_count > rerun_setup and report.failed and not report.passed:
                    # last run and failure detected on setup
                    report.failed_to_verify = True
                    item.ihook.pytest_runtest_logreport(report=report)
                elif item.execution_count > rerun_setup or not report.failed or xfail:
                    # last run or no failure detected, log normally
                    item.ihook.pytest_runtest_logreport(report=report)
                else:
                    # failure detected and reruns not exhausted, since i < reruns
                    time.sleep(delay)
                    report.outcome = 'setup rerun'
                    _clear_cache(parallel, report, item)
                    break  # trigger rerun
            else:
                report.rerun = item.execution_count - 1
                xfail = hasattr(report, 'wasxfail')
                if item.execution_count > reruns or not report.failed or xfail:
                    # last run or no failure detected, log normally
                    item.ihook.pytest_runtest_logreport(report=report)
                else:
                    # failure detected and reruns not exhausted, since i < reruns
                    time.sleep(delay)
                    report.outcome = 'rerun'
                    _clear_cache(parallel, report, item)
                    break  # trigger rerun
        else:
            need_to_run = False

        item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid,
                                            location=item.location)

    return True
Ejemplo n.º 24
0
 def run_test_again():
     # This invokes our setup/teardown hooks again
     # Inspired by https://pypi.python.org/pypi/pytest-rerunfailures
     from _pytest.runner import runtestprotocol
     item._initrequest()  # Re-init fixtures
     reports = runtestprotocol(item, nextitem=nextitem, log=False)
Ejemplo n.º 25
0
 def getrunner(self):
     return lambda item: runner.runtestprotocol(item, log=False)
Ejemplo n.º 26
0
 def runforked():
     try:
         reports = runtestprotocol(item, log=False)
     except KeyboardInterrupt:
         os._exit(EXITSTATUS_TESTEXIT)
     return marshal.dumps([serialize_report(x) for x in reports])
Ejemplo n.º 27
0
 def f(item):
     return runner.runtestprotocol(item, log=False)
Ejemplo n.º 28
0
def pytest_runtest_protocol(item, nextitem, driver):
    reports = runtestprotocol(item, nextitem=nextitem)
    for report in reports:
        if report.when == 'call':
            driver.execute_script('sauce:job-result={}'.format(report.outcome))
    return True
Ejemplo n.º 29
0
def pytest_runtest_protocol(item, nextitem):
    reports = runtestprotocol(item, nextitem=nextitem)
    for report in reports:
        if report.when == "call":
            print("\n{} --- {}".format(item.name, report.outcome))
    return True
Ejemplo n.º 30
0
 def run_test_again():
     # This invokes our setup/teardown hooks again
     # Inspired by https://pypi.python.org/pypi/pytest-rerunfailures
     from _pytest.runner import runtestprotocol
     item._initrequest()  # Re-init fixtures
     reports = runtestprotocol(item, nextitem=nextitem, log=False)
Ejemplo n.º 31
0
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for the test
    case and the other for the teardown error.

    Note: in some versions of py.test, when setup fails on a test that has been marked with xfail, 
    it gets an XPASS rather than an XFAIL 
    (https://bitbucket.org/hpk42/pytest/issue/160/an-exception-thrown-in)
    fix should be released in version 2.2.5
    """

    if not hasattr(item, 'get_marker'):
        # pytest < 2.4.2 doesn't support get_marker
        rerun_marker = None
        val = item.keywords.get("flaky", None)
        if val is not None:
            from _pytest.mark import MarkInfo, MarkDecorator
            if isinstance(val, (MarkDecorator, MarkInfo)):
                rerun_marker = val
    else:
        #In pytest 2.4.2, we can do this pretty easily.
        rerun_marker = item.get_marker("flaky")

    #Use the marker as a priority over the global setting.
    if rerun_marker is not None:
        if "reruns" in rerun_marker.kwargs:
            #Check for keyword arguments
            reruns = rerun_marker.kwargs["reruns"]
        elif len(rerun_marker.args) > 0:
            #Check for arguments
            reruns = rerun_marker.args[0]
    elif item.session.config.option.reruns is not None:
        #Default to the global setting
        reruns = item.session.config.option.reruns
    else:
        #Global setting is not specified, and this test is not marked with flaky
        return
    
    # while this doesn't need to be run with every item, it will fail on the first 
    # item if necessary
    check_options(item.session.config)

    item.ihook.pytest_runtest_logstart(
        nodeid=item.nodeid, location=item.location,
    )

    for i in range(reruns+1):  # ensure at least one run of each item
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        if any(j.skipped for j in reports) or all(j.passed for j in reports):
            break

        if item.get_marker('xfail'):
            break

    for report in reports:
        if report.when in ("call"):
            if i > 0:
                report.rerun = i
        item.ihook.pytest_runtest_logreport(report=report)

    # pytest_runtest_protocol returns True
    return True
Ejemplo n.º 32
0
def pytest_runtest_protocol(item, nextitem):
    '''pytest hook: Called to execute a test.

    This hook wraps the standard pytest runtestprotocol() function in order
    to acquire visibility into, and record, each test function's result.

    Args:
        item: The pytest test item to execute.
        nextitem: The pytest test item that will be executed after this one.

    Returns:
        A list of pytest reports (test result data).
    '''

    reports = runtestprotocol(item, nextitem=nextitem)
    failed = None
    skipped = None
    for report in reports:
        if report.outcome == 'failed':
            failed = report
            break
        if report.outcome == 'skipped':
            if not skipped:
                skipped = report

    if failed:
        tests_failed.add(item.name)
    elif skipped:
        tests_skipped.add(item.name)
    else:
        tests_passed.add(item.name)
    tests_not_run.remove(item.name)

    try:
        if failed:
            msg = 'FAILED:\n' + str(failed.longrepr)
            log.status_fail(msg)
        elif skipped:
            msg = 'SKIPPED:\n' + str(skipped.longrepr)
            log.status_skipped(msg)
        else:
            log.status_pass('OK')
    except:
        # If something went wrong with logging, it's better to let the test
        # process continue, which may report other exceptions that triggered
        # the logging issue (e.g. console.log wasn't created). Hence, just
        # squash the exception. If the test setup failed due to e.g. syntax
        # error somewhere else, this won't be seen. However, once that issue
        # is fixed, if this exception still exists, it will then be logged as
        # part of the test's stdout.
        import traceback
        print 'Exception occurred while logging runtest status:'
        traceback.print_exc()
        # FIXME: Can we force a test failure here?

    log.end_section(item.name)

    if failed:
        console.cleanup_spawn()

    return reports
Ejemplo n.º 33
0
def pytest_runtest_protocol(item, nextitem):
    '''pytest hook: Called to execute a test.

    This hook wraps the standard pytest runtestprotocol() function in order
    to acquire visibility into, and record, each test function's result.

    Args:
        item: The pytest test item to execute.
        nextitem: The pytest test item that will be executed after this one.

    Returns:
        A list of pytest reports (test result data).
    '''

    reports = runtestprotocol(item, nextitem=nextitem)
    failed = None
    skipped = None
    for report in reports:
        if report.outcome == 'failed':
            failed = report
            break
        if report.outcome == 'skipped':
            if not skipped:
                skipped = report

    if failed:
        tests_failed.add(item.name)
    elif skipped:
        tests_skipped.add(item.name)
    else:
        tests_passed.add(item.name)
    tests_not_run.remove(item.name)

    try:
        if failed:
            msg = 'FAILED:\n' + str(failed.longrepr)
            log.status_fail(msg)
        elif skipped:
            msg = 'SKIPPED:\n' + str(skipped.longrepr)
            log.status_skipped(msg)
        else:
            log.status_pass('OK')
    except:
        # If something went wrong with logging, it's better to let the test
        # process continue, which may report other exceptions that triggered
        # the logging issue (e.g. console.log wasn't created). Hence, just
        # squash the exception. If the test setup failed due to e.g. syntax
        # error somewhere else, this won't be seen. However, once that issue
        # is fixed, if this exception still exists, it will then be logged as
        # part of the test's stdout.
        import traceback
        print 'Exception occurred while logging runtest status:'
        traceback.print_exc()
        # FIXME: Can we force a test failure here?

    log.end_section(item.name)

    if failed:
        console.cleanup_spawn()

    return reports
Ejemplo n.º 34
0
def pytest_runtest_protocol(item, nextitem):
    reports = runtestprotocol(item, nextitem=nextitem)
    for report in reports:
        if report.when == 'call':
            print(f"\n{item.name} --- {report.outcome}")
    return True
Ejemplo n.º 35
0
def pytest_runtest_protocol(item, nextitem):
    reports = runtestprotocol(item, nextitem=nextitem)
    should_execute_reporting = True

    try:
        test_run_key = getattr(pytest, 'adaptavist_test_run_key')
    except AttributeError:
        logger.warning(
            "Adaptavist Jira's test management plugin reporting is not available for this test case! The test run key wasn't set in pytest namespace!"
        )
        should_execute_reporting = False
    else:
        if test_run_key == None:
            logger.warning(
                "Adaptavist Jira's test management plugin reporting is not available for this test case! The test run key is set to null!"
            )
            should_execute_reporting = False

    try:
        test_project_key = getattr(pytest, 'adaptavist_test_cases_project_key')
    except AttributeError:
        logger.warning(
            "Adaptavist Jira's test management plugin reporting is not available for this test case! The project key wasn't set in pytest namespace!"
        )
        should_execute_reporting = False
    else:
        if test_project_key == None:
            logger.warning(
                "Adaptavist Jira's test management plugin reporting is not available for this test case! The test project key is set to null!"
            )
            should_execute_reporting = False

    try:
        adaptavist_instance = getattr(pytest, 'adaptavist_instance')
    except AttributeError:
        logger.warning(
            "Adaptavist Jira's test management plugin reporting is not available for this test case! The adaptavist instance wasn't set in pytest namespace!"
        )
        should_execute_reporting = False
    else:
        if adaptavist_instance == None:
            logger.warning(
                "Adaptavist Jira's test management plugin reporting is not available for this test case! The adaptavist instance is set to null!"
            )
            should_execute_reporting = False

    if should_execute_reporting:
        for report in reports:
            if report.when == 'call':
                test_name = item.name
                test_case_key = pytest.adaptavist_test_cases_project_key + "-" + test_name.split(
                    '_')[1]
                logger.info(test_case_key)
                logger.info(report.outcome)
                if report.passed:
                    adaptavist_instance.edit_test_result_status(
                        test_run_key, test_case_key, "Pass")
                elif report.failed:
                    adaptavist_instance.edit_test_result_status(
                        test_run_key, test_case_key, "Fail")

    return True
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for
    the test case and the other for the teardown error.
    """
    rerun_marker = item.get_marker("flaky")
    # use the marker as a priority over the global setting.
    if rerun_marker is not None:
        if "reruns" in rerun_marker.kwargs:
            # check for keyword arguments
            reruns = rerun_marker.kwargs["reruns"]
        elif len(rerun_marker.args) > 0:
            # check for arguments
            reruns = rerun_marker.args[0]
        else:
            reruns = 1
    elif item.session.config.option.reruns is not None:
        # default to the global setting
        reruns = item.session.config.option.reruns
    else:
        # global setting is not specified, and this test is not marked with
        # flaky
        return

    # while this doesn't need to be run with every item, it will fail on the
    # first item if necessary
    check_options(item.session.config)

    for i in range(reruns + 1):  # ensure at least one run of each item
        is_rerun = True if i > 0 else False
        if "is_rerun" in item.funcargnames:
            if item.funcargs is None:
                item._initrequest()
            item.funcargs["is_rerun"] = is_rerun

        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                           location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, test, teardown
            report.rerun = i
            xfail = hasattr(report, 'wasxfail')
            if i == reruns or not report.failed or xfail:
                # last run or no failure detected, log normally
                item.ihook.pytest_runtest_logreport(report=report)
            else:
                # failure detected and reruns not exhausted, since i < reruns
                report.outcome = 'rerun'

                # When running tests in parallel using pytest-xdist the first
                # report that is logged will finish and terminate the current
                # node rather rerunning the test. Thus we must skip logging of
                # intermediate results when running in parallel, otherwise no
                # test is rerun.
                # See: https://github.com/pytest-dev/pytest/issues/1193
                parallel_testing = hasattr(item.config, 'slaveinput')
                if not parallel_testing:
                    # will rerun test, log intermediate result
                    item.ihook.pytest_runtest_logreport(report=report)

                break  # trigger rerun
        else:
            return True  # no need to rerun

    return True
Ejemplo n.º 37
0
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for the test
    case and the other for the teardown error.

    Note: in some versions of py.test, when setup fails on a test that has been marked with xfail, 
    it gets an XPASS rather than an XFAIL 
    (https://bitbucket.org/hpk42/pytest/issue/160/an-exception-thrown-in)
    fix should be released in version 2.2.5
    """

    if not hasattr(item, 'get_marker'):
        # pytest < 2.4.2 doesn't support get_marker
        rerun_marker = None
        val = item.keywords.get("flaky", None)
        if val is not None:
            from _pytest.mark import MarkInfo, MarkDecorator
            if isinstance(val, (MarkDecorator, MarkInfo)):
                rerun_marker = val
    else:
        #In pytest 2.4.2, we can do this pretty easily.
        rerun_marker = item.get_marker("flaky")

    #Use the marker as a priority over the global setting.
    if rerun_marker is not None:
        if "reruns" in rerun_marker.kwargs:
            #Check for keyword arguments
            reruns = rerun_marker.kwargs["reruns"]
        elif len(rerun_marker.args) > 0:
            #Check for arguments
            reruns = rerun_marker.args[0]
    elif item.session.config.option.reruns is not None:
        #Default to the global setting
        reruns = item.session.config.option.reruns
    else:
        #Global setting is not specified, and this test is not marked with flaky
        return

    # while this doesn't need to be run with every item, it will fail on the first
    # item if necessary
    check_options(item.session.config)

    item.ihook.pytest_runtest_logstart(
        nodeid=item.nodeid,
        location=item.location,
    )

    for i in range(reruns + 1):  # ensure at least one run of each item
        reports = runtestprotocol(item, nextitem=nextitem, log=False)
        # break if setup and call pass
        if reports[0].passed and reports[1].passed:
            break

        # break if test marked xfail
        evalxfail = getattr(item, '_evalxfail', None)
        if evalxfail:
            break

    for report in reports:
        if report.when in ("call"):
            if i > 0:
                report.rerun = i
        item.ihook.pytest_runtest_logreport(report=report)

    # pytest_runtest_protocol returns True
    return True
Ejemplo n.º 38
0
def pytest_runtest_protocol(item, nextitem):
    reports = runtestprotocol(item, nextitem=nextitem)
    for report in reports:
        if report.when == 'call':
            print('\n\t%s --- %s' % (item.name, report.outcome))
    return True
Ejemplo n.º 39
0
 def getrunner(self):
     return lambda item: runner.runtestprotocol(item, log=False)
Ejemplo n.º 40
0
def pytest_runtest_protocol(item, nextitem):
    global disabled
    if (disabled == True):
        return
    global data
    reports = runtestprotocol(item, nextitem=nextitem)
    for report in reports:
        if report.when == 'call':
            name = item.name
            suite = None
            try:
                suite = item.get_marker('suite')
                if (suite):
                    suite = suite.args[0]  #extract val from marker
            except AttributeError:
                # no get_marker if pytest 4
                pass
            if (suite is None):
                try:
                    suite = item.get_closest_marker('suite')
                    if (suite):
                        suite = suite.args[0]  #extract val from marker
                except AttributeError:
                    # no get_closest_marker in pytest 3
                    pass
            if (suite is None):
                global nosuites
                if (nosuites == False):
                    suite = str(item.parent.name)
                    suite = suite.rpartition("/")[2]
                    suite = suite.rpartition(".py")[0]
            testcase = {
                'name': name,
                'result': tesultsFriendlyResult(report.outcome),
                'start': startTimes[item.nodeid],
                'end': int(round(time.time() * 1000)),
                'reason': reasonForFailure(report)
            }
            if (suite):
                testcase['suite'] = suite
            files = filesForTest(suite, name)
            if (files):
                if len(files) > 0:
                    testcase['files'] = files
            params = paramsForTest(item)
            if (params):
                testcase['params'] = params
                testname = item.name.split('[')
                if len(testname) > 1:
                    testcase['name'] = testname[0]
            paramDesc = None
            try:
                paramDesc = item.get_marker('description')
            except AttributeError:
                # no get_marker if pytest 4
                pass
            if (paramDesc is None):
                try:
                    paramDesc = item.get_closest_marker('description')
                except AttributeError:
                    # no get_closest_marker in pytest 3
                    pass

            if (paramDesc):
                testcase['desc'] = paramDesc.args[0]
            data['results']['cases'].append(testcase)

            try:
                markers = item.iter_markers()
                for marker in markers:
                    if (marker.name == 'description' or marker.name == 'desc'):
                        testcase['desc'] = marker.args[0]
                    elif (marker.name == 'parametrize'
                          or marker.name == 'filterwarnings'
                          or marker.name == 'skip' or marker.name == 'skipif'
                          or marker.name == 'usefixtures'
                          or marker.name == 'xfail' or marker.name == 'suite'):
                        pass
                    else:
                        testcase['_' + marker.name] = marker.args[0]
            except AttributeError:
                pass

    return True
Ejemplo n.º 41
0
def pytest_runtest_protocol(item, nextitem):
    """pytest hook: Called to execute a test.

    This hook wraps the standard pytest runtestprotocol() function in order
    to acquire visibility into, and record, each test function's result.

    Args:
        item: The pytest test item to execute.
        nextitem: The pytest test item that will be executed after this one.

    Returns:
        A list of pytest reports (test result data).
    """

    log.get_and_reset_warning()
    reports = runtestprotocol(item, nextitem=nextitem)
    was_warning = log.get_and_reset_warning()

    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
    # the test is skipped. That call is required to create the test's section
    # in the log file. The call to log.end_section() requires that the log
    # contain a section for this test. Create a section for the test if it
    # doesn't already exist.
    if not item.name in anchors:
        start_test_section(item)

    failure_cleanup = False
    if not was_warning:
        test_list = tests_passed
        msg = 'OK'
        msg_log = log.status_pass
    else:
        test_list = tests_warning
        msg = 'OK (with warning)'
        msg_log = log.status_warning
    for report in reports:
        if report.outcome == 'failed':
            if hasattr(report, 'wasxfail'):
                test_list = tests_xpassed
                msg = 'XPASSED'
                msg_log = log.status_xpass
            else:
                failure_cleanup = True
                test_list = tests_failed
                msg = 'FAILED:\n' + str(report.longrepr)
                msg_log = log.status_fail
            break
        if report.outcome == 'skipped':
            if hasattr(report, 'wasxfail'):
                failure_cleanup = True
                test_list = tests_xfailed
                msg = 'XFAILED:\n' + str(report.longrepr)
                msg_log = log.status_xfail
                break
            test_list = tests_skipped
            msg = 'SKIPPED:\n' + str(report.longrepr)
            msg_log = log.status_skipped

    if failure_cleanup:
        console.drain_console()

    test_list.append(item.name)
    tests_not_run.remove(item.name)

    try:
        msg_log(msg)
    except:
        # If something went wrong with logging, it's better to let the test
        # process continue, which may report other exceptions that triggered
        # the logging issue (e.g. console.log wasn't created). Hence, just
        # squash the exception. If the test setup failed due to e.g. syntax
        # error somewhere else, this won't be seen. However, once that issue
        # is fixed, if this exception still exists, it will then be logged as
        # part of the test's stdout.
        import traceback
        print('Exception occurred while logging runtest status:')
        traceback.print_exc()
        # FIXME: Can we force a test failure here?

    log.end_section(item.name)

    if failure_cleanup:
        console.cleanup_spawn()

    return reports
Ejemplo n.º 42
0
def pytest_runtest_protocol(item, nextitem):
    """
    Note: when teardown fails, two reports are generated for the case, one for
    the test case and the other for the teardown error.
    """

    reruns = get_reruns_count(item)
    if reruns is None:
        # global setting is not specified, and this test is not marked with
        # flaky
        return

    # while this doesn't need to be run with every item, it will fail on the
    # first item if necessary
    check_options(item.session.config)
    delay = get_reruns_delay(item)
    parallel = hasattr(item.config, "slaveinput")
    item.execution_count = 0

    need_to_run = True
    while need_to_run:
        item.execution_count += 1
        item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
                                           location=item.location)
        reports = runtestprotocol(item, nextitem=nextitem, log=False)

        for report in reports:  # 3 reports: setup, call, teardown
            is_terminal_error = _should_hard_fail_on_error(
                item.session.config, report)
            report.rerun = item.execution_count - 1
            xfail = hasattr(report, "wasxfail")
            if (item.execution_count > reruns or not report.failed or xfail
                    or is_terminal_error):
                # last run or no failure detected, log normally
                item.ihook.pytest_runtest_logreport(report=report)
                if (item.execution_count != 1 and not report.failed
                        and report.when == "call"
                        and get_flaky_flag(item) == 1):
                    print("\nFLAKY TEST DETECTED: " + str(report.nodeid))
                    listOfFlakyTestCases.append(str(report.nodeid))
            else:
                # failure detected and reruns not exhausted, since i < reruns
                report.outcome = "rerun"
                time.sleep(delay)

                if not parallel or works_with_current_xdist():
                    # will rerun test, log intermediate result
                    item.ihook.pytest_runtest_logreport(report=report)

                # cleanin item's cashed results from any level of setups
                _remove_cached_results_from_failed_fixtures(item)
                _remove_failed_setup_state_from_session(item)

                break  # trigger rerun
        else:
            need_to_run = False

        item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid,
                                            location=item.location)

    if nextitem is None and get_flaky_flag(item) == 1:
        print("\nList of Flaky testcases in this run: ", listOfFlakyTestCases)

    return True
Ejemplo n.º 43
0
 def runforked():
     try:
         reports = runtestprotocol(item, log=False)
     except KeyboardInterrupt:
         py.std.os._exit(EXITSTATUS_TESTEXIT)
     return marshal.dumps([serialize_report(x) for x in reports])
Ejemplo n.º 44
0
def pytest_runtest_protocol(item, nextitem):
    """pytest hook: Called to execute a test.

    This hook wraps the standard pytest runtestprotocol() function in order
    to acquire visibility into, and record, each test function's result.

    Args:
        item: The pytest test item to execute.
        nextitem: The pytest test item that will be executed after this one.

    Returns:
        A list of pytest reports (test result data).
    """

    reports = runtestprotocol(item, nextitem=nextitem)

    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
    # the test is skipped. That call is required to create the test's section
    # in the log file. The call to log.end_section() requires that the log
    # contain a section for this test. Create a section for the test if it
    # doesn't already exist.
    if not item.name in anchors:
        start_test_section(item)

    failure_cleanup = False
    test_list = tests_passed
    msg = 'OK'
    msg_log = log.status_pass
    for report in reports:
        if report.outcome == 'failed':
            if hasattr(report, 'wasxfail'):
                test_list = tests_xpassed
                msg = 'XPASSED'
                msg_log = log.status_xpass
            else:
                failure_cleanup = True
                test_list = tests_failed
                msg = 'FAILED:\n' + str(report.longrepr)
                msg_log = log.status_fail
            break
        if report.outcome == 'skipped':
            if hasattr(report, 'wasxfail'):
                failure_cleanup = True
                test_list = tests_xfailed
                msg = 'XFAILED:\n' + str(report.longrepr)
                msg_log = log.status_xfail
                break
            test_list = tests_skipped
            msg = 'SKIPPED:\n' + str(report.longrepr)
            msg_log = log.status_skipped

    if failure_cleanup:
        console.drain_console()

    test_list.append(item.name)
    tests_not_run.remove(item.name)

    try:
        msg_log(msg)
    except:
        # If something went wrong with logging, it's better to let the test
        # process continue, which may report other exceptions that triggered
        # the logging issue (e.g. console.log wasn't created). Hence, just
        # squash the exception. If the test setup failed due to e.g. syntax
        # error somewhere else, this won't be seen. However, once that issue
        # is fixed, if this exception still exists, it will then be logged as
        # part of the test's stdout.
        import traceback
        print 'Exception occurred while logging runtest status:'
        traceback.print_exc()
        # FIXME: Can we force a test failure here?

    log.end_section(item.name)

    if failure_cleanup:
        console.cleanup_spawn()

    return reports
Ejemplo n.º 45
0
 def f(item):
     return runner.runtestprotocol(item, log=False)
Ejemplo n.º 46
0
 def pytest_runtestloop(self, session):
     if self._using_xdist:
         #Yes, we don't have the hooks we'd need to show the results in the pyunit view...
         #Maybe the plugin maintainer may be able to provide these additional hooks?
         return None
     
     #This mock will make all file representations to be printed as Pydev expects, 
     #so that hyperlinks are properly created in errors. Note that we don't unmock it!
     self._MockFileRepresentation()
     
     #Based on the default run test loop: _pytest.session.pytest_runtestloop
     #but getting the times we need, reporting the number of tests found and notifying as each
     #test is run.
     
     start_total = time.time()
     try:
         pydev_runfiles_xml_rpc.notifyTestsCollected(len(session.session.items))
         
         if session.config.option.collectonly:
             return True
         
         for item in session.session.items:
             
             filename = item.fspath.strpath
             test = item.location[2]
             start = time.time()
             
             pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
             
             #Don't use this hook because we need the actual reports.
             #item.config.hook.pytest_runtest_protocol(item=item)
             reports = runner.runtestprotocol(item)
             delta = time.time() - start
             
             captured_output = ''
             error_contents = ''
             
             
             status = 'ok'
             for r in reports:
                 if r.outcome not in ('passed', 'skipped'):
                     #It has only passed, skipped and failed (no error), so, let's consider error if not on call.
                     if r.when == 'setup':
                         if status == 'ok':
                             status = 'error'
                         
                     elif r.when == 'teardown':
                         if status == 'ok':
                             status = 'error'
                         
                     else:
                         #any error in the call (not in setup or teardown) is considered a regular failure.
                         status = 'fail'
                     
                 if hasattr(r, 'longrepr') and r.longrepr:
                     rep = r.longrepr
                     if hasattr(rep, 'reprcrash'):
                         reprcrash = rep.reprcrash
                         error_contents += str(reprcrash)
                         error_contents += '\n'
                         
                     if hasattr(rep, 'reprtraceback'):
                         error_contents += str(rep.reprtraceback)
                         
                     if hasattr(rep, 'sections'):
                         for name, content, sep in rep.sections:
                             error_contents += sep * 40 
                             error_contents += name 
                             error_contents += sep * 40 
                             error_contents += '\n'
                             error_contents += content 
                             error_contents += '\n'
             
             self.reportCond(status, filename, test, captured_output, error_contents, delta)
             
             if session.shouldstop:
                 raise session.Interrupted(session.shouldstop)
     finally:
         pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - start_total,))
     return True
Ejemplo n.º 47
0
 def pytest_runtestloop(self, session):
     if self._using_xdist:
         #Yes, we don't have the hooks we'd need to show the results in the pyunit view...
         #Maybe the plugin maintainer may be able to provide these additional hooks?
         return None
     
     #This mock will make all file representations to be printed as Pydev expects, 
     #so that hyperlinks are properly created in errors. Note that we don't unmock it!
     self._MockFileRepresentation()
     
     #Based on the default run test loop: _pytest.session.pytest_runtestloop
     #but getting the times we need, reporting the number of tests found and notifying as each
     #test is run.
     
     start_total = time.time()
     try:
         pydev_runfiles_xml_rpc.notifyTestsCollected(len(session.session.items))
         
         if session.config.option.collectonly:
             return True
         
         for item in session.session.items:
             
             filename = item.fspath.strpath
             test = item.location[2]
             start = time.time()
             
             pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
             
             #Don't use this hook because we need the actual reports.
             #item.config.hook.pytest_runtest_protocol(item=item)
             reports = runner.runtestprotocol(item)
             delta = time.time() - start
             
             captured_output = ''
             error_contents = ''
             
             
             status = 'ok'
             for r in reports:
                 if r.outcome not in ('passed', 'skipped'):
                     #It has only passed, skipped and failed (no error), so, let's consider error if not on call.
                     if r.when == 'setup':
                         if status == 'ok':
                             status = 'error'
                         
                     elif r.when == 'teardown':
                         if status == 'ok':
                             status = 'error'
                         
                     else:
                         #any error in the call (not in setup or teardown) is considered a regular failure.
                         status = 'fail'
                     
                 if r.longrepr:
                     rep = r.longrepr
                     reprcrash = rep.reprcrash
                     error_contents += str(reprcrash)
                     error_contents += '\n'
                     error_contents += str(rep.reprtraceback)
                     for name, content, sep in rep.sections:
                         error_contents += sep * 40 
                         error_contents += name 
                         error_contents += sep * 40 
                         error_contents += '\n'
                         error_contents += content 
                         error_contents += '\n'
             
             self.reportCond(status, filename, test, captured_output, error_contents, delta)
             
             if session.shouldstop:
                 raise session.Interrupted(session.shouldstop)
     finally:
         pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - start_total,))
     return True
         
Ejemplo n.º 48
0
    config.pluginmanager.unregister(standard_reporter)
    config.pluginmanager.register(contextional_reporter, "terminalreporter")


def pytest_runtest_protocol(item, nextitem):
    if item.obj == GroupTestCase.runTest:
        # the current test is a GroupTestCase test
        case = get_next_test_from_helper()
        item._nodeid = item.nodeid.split("::")[0] + "::"
        item._nodeid += case._inline_description
        item._location = case
    item.ihook.pytest_runtest_logstart(
        nodeid=item.nodeid,
        location=item.location,
    )
    runner.runtestprotocol(item, nextitem=nextitem)
    if item.obj == GroupTestCase.runTest:
        handle_teardowns(item)
    return True


class FakeItem(object):

    _case = None

    def __init__(self, item):
        self._item = item

    def listchain(self):
        # grab the session and module from the actual listchain.
        chain_start = self._item.listchain()[:2]