def driver(request): kwargs = {} try: driver_class = request.param except AttributeError: raise Exception('This test requires a --driver to be specified.') # conditionally mark tests as expected to fail based on driver request.node._evalxfail = request.node._evalxfail or MarkEvaluator( request.node, 'xfail_{0}'.format(driver_class.lower())) if request.node._evalxfail.istrue(): def fin(): global driver_instance if driver_instance is not None: driver_instance.quit() driver_instance = None request.addfinalizer(fin) # skip driver instantiation if xfail(run=False) if not request.config.getoption('runxfail'): if request.node._evalxfail.istrue(): if request.node._evalxfail.get('run') is False: yield return driver_path = request.config.option.executable options = None global driver_instance if driver_instance is None: if driver_class == 'BlackBerry': kwargs.update({'device_password': '******'}) if driver_class == 'Firefox': kwargs.update({'capabilities': {'marionette': False}}) options = get_options(driver_class, request.config) if driver_class == 'Marionette': driver_class = 'Firefox' options = get_options(driver_class, request.config) if driver_class == 'Remote': capabilities = DesiredCapabilities.FIREFOX.copy() kwargs.update({'desired_capabilities': capabilities}) options = get_options('Firefox', request.config) if driver_class == 'WebKitGTK': options = get_options(driver_class, request.config) if driver_path is not None: kwargs['executable_path'] = driver_path if options is not None: kwargs['options'] = options driver_instance = getattr(webdriver, driver_class)(**kwargs) yield driver_instance if MarkEvaluator(request.node, 'no_driver_after_test').istrue(): driver_instance = None
def test_marked_one_arg_unicode(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz(u"hasattr(os, 'sep')") def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')"
def pytest_collection_modifyitems(self, session, config, items): """Handle skipif condition and remove skipped tests from test run. """ self.items_count = len(items) # pylint: disable=attribute-defined-outside-init self.reasons = set() # pylint: disable=attribute-defined-outside-init for item in items[:]: evalskip = MarkEvaluator(item, 'skipif') if evalskip.istrue(): self.reasons.add(evalskip.getexplanation()) items.remove(item) self.filtered_count = len(items) # pylint: disable=attribute-defined-outside-init
def test_marked_no_args(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "" assert not ev.get("run", False)
def test_skipif_class(self, testdir): item, = testdir.getitems(""" import pytest class TestClass: pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass """) item.config._hackxyz = 3 ev = MarkEvaluator(item, 'skipif') assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: config._hackxyz"
def test_marked_one_arg_twice2(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.skipif("hasattr(os, 'murks')") @pytest.mark.skipif("not hasattr(os, 'murks')") def test_func(): pass """) ev = MarkEvaluator(item, 'skipif') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_with_reason(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "hello world" assert ev.get("attr") == 2
def test_skipif_class(self, testdir): item, = testdir.getitems(""" import pytest class TestClass(object): pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass """) item.config._hackxyz = 3 ev = MarkEvaluator(item, 'skipif') assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: config._hackxyz"
def pytest_runtest_setup(item): skip_until_info = item.get_closest_marker('skip_until') if isinstance(skip_until_info, Mark): # If the date parameter is used, it will take precedence if "date" in skip_until_info.kwargs and isinstance( skip_until_info.kwargs["date"], datetime): skip_until_info.kwargs["condition"] = datetime.utcnow( ) >= skip_until_info.kwargs['date'] eval_skip_until = MarkEvaluator(item, 'skip_until') skip_until_info.kwargs.get("condition", None) item._evalskipuntil = eval_skip_until if not eval_skip_until.istrue(): pytest.skip(eval_skip_until.getexplanation())
def driver(request): kwargs = {} try: driver_class = request.param except AttributeError: raise Exception('This test requires a --driver to be specified.') # conditionally mark tests as expected to fail based on driver request.node._evalxfail = request.node._evalxfail or MarkEvaluator( request.node, 'xfail_{0}'.format(driver_class.lower())) # skip driver instantiation if xfail(run=False) if not request.config.getoption('runxfail'): if request.node._evalxfail.istrue(): if request.node._evalxfail.get('run') is False: yield return if driver_class == 'BlackBerry': kwargs.update({'device_password': '******'}) if driver_class == 'Firefox': kwargs.update({'capabilities': {'marionette': False}}) if driver_class == 'Marionette': driver_class = 'Firefox' kwargs.update({'capabilities': {'marionette': True}}) if driver_class == 'Remote': capabilities = DesiredCapabilities.FIREFOX.copy() kwargs.update({'desired_capabilities': capabilities}) driver = getattr(webdriver, driver_class)(**kwargs) yield driver try: driver.quit() except: pass
def addSkip(self, testcase, reason): try: pytest.skip(reason) except pytest.skip.Exception: self._evalskip = MarkEvaluator(self, 'SkipTest') self._evalskip.result = True self._addexcinfo(sys.exc_info())
def test_marked_one_arg_twice(self, testdir): lines = [ '''@pytest.mark.skipif("not hasattr(os, 'murks')")''', '''@pytest.mark.skipif("hasattr(os, 'murks')")''' ] for i in range(0, 2): item = testdir.getitem(""" import pytest %s %s def test_func(): pass """ % (lines[i], lines[(i + 1) % 2])) ev = MarkEvaluator(item, 'skipif') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')"
def pytest_runtest_setup(item): # Check if block or blockif are specified as pytest marks blockif_info = item.get_marker('blockif') if blockif_info is not None: eval_blockif = MarkEvaluator(item, 'blockif') if eval_blockif.istrue(): item._evalblock = eval_blockif pytest.block(eval_blockif.getexplanation()) block_info = item.get_marker('block') if isinstance(block_info, (MarkInfo, MarkDecorator)): item._evalblock = True if 'reason' in block_info.kwargs: block(block_info.kwargs['reason']) elif block_info.args: block(block_info.args[0]) else: block("unconditional skip")
def test_marked_skip_with_not_string(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.skipif(False) def test_func(): pass """) ev = MarkEvaluator(item, 'skipif') exc = pytest.raises(pytest.fail.Exception, ev.istrue) assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg
def browser(request, browser_manager): # conditionally mark tests as expected to fail based on driver request.node._evalxfail = request.node._evalxfail or MarkEvaluator( request.node, 'xfail_{}'.format(browser_manager.name)) if request.node._evalxfail.istrue(): def fin(): browser_manager.quit() request.addfinalizer(fin) # skip driver instantiation if xfail(run=False) if not request.config.getoption('runxfail'): if request.node._evalxfail.istrue(): if request.node._evalxfail.get('run') is False: yield return yield browser_manager.browser if MarkEvaluator(request.node, 'quits_browser').istrue(): browser_manager.quit()
def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") evalskipif = MarkEvaluator(item, 'skipif') assert not evalskipif assert not evalskipif.istrue()