forked from pytest-dev/pytest-rerunfailures
/
pytest_rerunfailures.py
260 lines (211 loc) · 8.23 KB
/
pytest_rerunfailures.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
import pkg_resources
import time
import warnings
import pytest
from _pytest.runner import runtestprotocol
from _pytest.resultlog import ResultLog
def works_with_current_xdist():
"""Returns compatibility with installed pytest-xdist version.
When running tests in parallel using pytest-xdist < 1.20.0, the first
report that is logged will finish and terminate the current node rather
rerunning the test. Thus we must skip logging of intermediate results under
these circumstances, otherwise no test is rerun.
"""
try:
d = pkg_resources.get_distribution('pytest-xdist')
return d.parsed_version >= pkg_resources.parse_version('1.20')
except pkg_resources.DistributionNotFound:
return None
# command line options
def pytest_addoption(parser):
group = parser.getgroup(
"rerunfailures",
"re-run failing tests to eliminate flaky failures")
group._addoption(
'--reruns',
action="store",
dest="reruns",
type=int,
default=0,
help="number of times to re-run failed tests. defaults to 0.")
group._addoption(
'--reruns-delay',
action='store',
dest='reruns_delay',
type=float,
default=0,
help='add time (seconds) delay between reruns.'
)
def pytest_configure(config):
# add flaky marker
config.addinivalue_line(
"markers", "flaky(reruns=1, reruns_delay=0): mark test to re-run up "
"to 'reruns' times. Add a delay of 'reruns_delay' seconds "
"between re-runs.")
# making sure the options make sense
# should run before / at the begining of pytest_cmdline_main
def check_options(config):
val = config.getvalue
if not val("collectonly"):
if config.option.reruns != 0:
if config.option.usepdb: # a core option
raise pytest.UsageError("--reruns incompatible with --pdb")
resultlog = getattr(config, '_resultlog', None)
if resultlog:
logfile = resultlog.logfile
config.pluginmanager.unregister(resultlog)
config._resultlog = RerunResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
def _get_marker(item):
try:
return item.get_closest_marker("flaky")
except AttributeError:
# pytest < 3.6
return item.get_marker("flaky")
def get_reruns_count(item):
rerun_marker = _get_marker(item)
reruns = None
# use the marker as a priority over the global setting.
if rerun_marker is not None:
if "reruns" in rerun_marker.kwargs:
# check for keyword arguments
reruns = rerun_marker.kwargs["reruns"]
elif len(rerun_marker.args) > 0:
# check for arguments
reruns = rerun_marker.args[0]
else:
reruns = 1
elif item.session.config.option.reruns:
# default to the global setting
reruns = item.session.config.option.reruns
return reruns
def get_reruns_delay(item):
rerun_marker = _get_marker(item)
if rerun_marker is not None:
if "reruns_delay" in rerun_marker.kwargs:
delay = rerun_marker.kwargs["reruns_delay"]
elif len(rerun_marker.args) > 1:
# check for arguments
delay = rerun_marker.args[1]
else:
delay = 0
else:
delay = item.session.config.option.reruns_delay
if delay < 0:
delay = 0
warnings.warn('Delay time between re-runs cannot be < 0. '
'Using default value: 0')
return delay
def _remove_cached_results_from_failed_fixtures(item):
"""
Note: remove all cached_result attribute from every fixture
"""
cached_result = 'cached_result'
fixture_info = getattr(item, '_fixtureinfo', None)
for fixture_def_str in getattr(fixture_info, 'name2fixturedefs', ()):
fixture_defs = fixture_info.name2fixturedefs[fixture_def_str]
for fixture_def in fixture_defs:
if hasattr(fixture_def, cached_result):
result, cache_key, err = getattr(fixture_def, cached_result)
if err: # Deleting cached results for only failed fixtures
delattr(fixture_def, cached_result)
def _remove_failed_setup_state_from_session(item):
"""
Note: remove all _prepare_exc attribute from every col in stack of _setupstate and cleaning the stack itself
"""
prepare_exc = "_prepare_exc"
setup_state = getattr(item.session, '_setupstate')
for col in setup_state.stack:
if hasattr(col, prepare_exc):
delattr(col, prepare_exc)
setup_state.stack = list()
def pytest_runtest_protocol(item, nextitem):
"""
Note: when teardown fails, two reports are generated for the case, one for
the test case and the other for the teardown error.
"""
reruns = get_reruns_count(item)
if reruns is None:
# global setting is not specified, and this test is not marked with
# flaky
return
# while this doesn't need to be run with every item, it will fail on the
# first item if necessary
check_options(item.session.config)
delay = get_reruns_delay(item)
parallel = hasattr(item.config, 'slaveinput')
item.execution_count = 0
while True:
item.execution_count += 1
item.ihook.pytest_runtest_logstart(nodeid=item.nodeid,
location=item.location)
reports = runtestprotocol(item, nextitem=nextitem, log=False)
for report in reports: # 3 reports: setup, call, teardown
report.rerun = item.execution_count - 1
xfail = hasattr(report, 'wasxfail')
if item.execution_count > reruns or not report.failed or xfail:
# last run or no failure detected, log normally
item.ihook.pytest_runtest_logreport(report=report)
else:
# failure detected and reruns not exhausted, since i < reruns
report.outcome = 'rerun'
time.sleep(delay)
if not parallel or works_with_current_xdist():
# will rerun test, log intermediate result
item.ihook.pytest_runtest_logreport(report=report)
# cleanin item's cashed results from any level of setups
_remove_cached_results_from_failed_fixtures(item)
_remove_failed_setup_state_from_session(item)
break # trigger rerun
else:
return True # no need to rerun
def pytest_report_teststatus(report):
"""Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
"""
if report.outcome == 'rerun':
return 'rerun', 'R', ('RERUN', {'yellow': True})
def pytest_terminal_summary(terminalreporter):
"""Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
"""
tr = terminalreporter
if not tr.reportchars:
return
lines = []
for char in tr.reportchars:
if char in 'rR':
show_rerun(terminalreporter, lines)
if lines:
tr._tw.sep("=", "rerun test summary info")
for line in lines:
tr._tw.line(line)
def show_rerun(terminalreporter, lines):
rerun = terminalreporter.stats.get("rerun")
if rerun:
for rep in rerun:
pos = rep.nodeid
lines.append("RERUN %s" % (pos,))
class RerunResultLog(ResultLog):
def __init__(self, config, logfile):
ResultLog.__init__(self, config, logfile)
def pytest_runtest_logreport(self, report):
"""
Adds support for rerun report fix for issue:
https://github.com/pytest-dev/pytest-rerunfailures/issues/28
"""
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(report=report)
code = res[1]
if code == 'x':
longrepr = str(report.longrepr)
elif code == 'X':
longrepr = ''
elif report.passed:
longrepr = ""
elif report.failed:
longrepr = str(report.longrepr)
elif report.skipped:
longrepr = str(report.longrepr[2])
elif report.outcome == 'rerun':
longrepr = str(report.longrepr)
self.log_outcome(report, code, longrepr)