def filter_by_ids(suite_or_case, test_ids): """Remove tests from suite_or_case where their id is not in test_ids. :param suite_or_case: A test suite or test case. :param test_ids: Something that supports the __contains__ protocol. :return: suite_or_case, unless suite_or_case was a case that itself fails the predicate when it will return a new unittest.TestSuite with no contents. This helper exists to provide backwards compatability with older versions of Python (currently all versions :)) that don't have a native filter_by_ids() method on Test(Case|Suite). For subclasses of TestSuite, filtering is done by: - attempting to call suite.filter_by_ids(test_ids) - if there is no method, iterating the suite and identifying tests to remove, then removing them from _tests, manually recursing into each entry. For objects with an id() method - TestCases, filtering is done by: - attempting to return case.filter_by_ids(test_ids) - if there is no such method, checking for case.id() in test_ids and returning case if it is, or TestSuite() if it is not. For anything else, it is not filtered - it is returned as-is. To provide compatability with this routine for a custom TestSuite, just define a filter_by_ids() method that will return a TestSuite equivalent to the original minus any tests not in test_ids. Similarly to provide compatability for a custom TestCase that does something unusual define filter_by_ids to return a new TestCase object that will only run test_ids that are in the provided container. If none would run, return an empty TestSuite(). The contract for this function does not require mutation - each filtered object can choose to return a new object with the filtered tests. However because existing custom TestSuite classes in the wild do not have this method, we need a way to copy their state correctly which is tricky: thus the backwards-compatible code paths attempt to mutate in place rather than guessing how to reconstruct a new suite. """ # Compatible objects if safe_hasattr(suite_or_case, 'filter_by_ids'): return suite_or_case.filter_by_ids(test_ids) # TestCase objects. if safe_hasattr(suite_or_case, 'id'): if suite_or_case.id() in test_ids: return suite_or_case else: return unittest.TestSuite() # Standard TestSuites or derived classes [assumed to be mutable]. if isinstance(suite_or_case, unittest.TestSuite): filtered = [] for item in suite_or_case: filtered.append(filter_by_ids(item, test_ids)) suite_or_case._tests[:] = filtered # Everything else: return suite_or_case
def _flatten_tests(suite_or_case, unpack_outer=False): try: tests = iter(suite_or_case) except TypeError: # Not iterable, assume it's a test case. return [(suite_or_case.id(), suite_or_case)] if (type(suite_or_case) in (unittest.TestSuite,) or unpack_outer): # Plain old test suite (or any others we may add). result = [] for test in tests: # Recurse to flatten. result.extend(_flatten_tests(test)) return result else: # Find any old actual test and grab its id. suite_id = None tests = iterate_tests(suite_or_case) for test in tests: suite_id = test.id() break # If it has a sort_tests method, call that. if safe_hasattr(suite_or_case, 'sort_tests'): suite_or_case.sort_tests() return [(suite_id, suite_or_case)]
def test_attribute_there(self): class Foo(object): pass foo = Foo() foo.attribute = None self.assertEqual(True, safe_hasattr(foo, 'attribute'))
def run_filter_script(result_factory, description, post_run_hook=None, protocol_version=1, passthrough_subunit=True): """Main function for simple subunit filter scripts. Many subunit filter scripts take a stream of subunit input and use a TestResult to handle the events generated by that stream. This function wraps a lot of the boiler-plate around that by making a script with options for handling passthrough information and stream forwarding, and that will exit with a successful return code (i.e. 0) if the input stream represents a successful test run. :param result_factory: A callable that takes an output stream and returns a test result that outputs to that stream. :param description: A description of the filter script. :param protocol_version: What protocol version to consume/emit. :param passthrough_subunit: If True, passthrough should be as subunit. """ parser = make_options(description) (options, args) = parser.parse_args() result = filter_by_result( result_factory, options.output_to, not options.no_passthrough, options.forward, protocol_version=protocol_version, passthrough_subunit=passthrough_subunit, input_stream=find_stream(sys.stdin, args)) if post_run_hook: post_run_hook(result) if not safe_hasattr(result, 'wasSuccessful'): result = result.decorated if result.wasSuccessful(): sys.exit(0) else: sys.exit(1)
def test_property_there(self): class Foo(object): @property def attribute(self): return None foo = Foo() self.assertEqual(True, safe_hasattr(foo, 'attribute'))
def __init__(self, module=__name__, defaultTest=None, argv=None, testRunner=None, testLoader=defaultTestLoader, exit=True, verbosity=1, failfast=None, catchbreak=None, buffer=None, stdout=None): if module == __name__: self.module = None elif istext(module): self.module = __import__(module) for part in module.split('.')[1:]: self.module = getattr(self.module, part) else: self.module = module if argv is None: argv = sys.argv if stdout is None: stdout = sys.stdout self.stdout = stdout self.exit = exit self.failfast = failfast self.catchbreak = catchbreak self.verbosity = verbosity self.buffer = buffer self.defaultTest = defaultTest self.listtests = False self.load_list = None self.testRunner = testRunner self.testLoader = testLoader progName = argv[0] if progName.endswith('%srun.py' % os.path.sep): elements = progName.split(os.path.sep) progName = '%s.run' % elements[-2] else: progName = os.path.basename(argv[0]) self.progName = progName self.parseArgs(argv) if self.load_list: # TODO: preserve existing suites (like testresources does in # OptimisingTestSuite.add, but with a standard protocol). # This is needed because the load_tests hook allows arbitrary # suites, even if that is rarely used. source = open(self.load_list, 'rb') try: lines = source.readlines() finally: source.close() test_ids = set(line.strip().decode('utf-8') for line in lines) self.test = filter_by_ids(self.test, test_ids) if not self.listtests: self.runTests() else: runner = self._get_runner() if safe_hasattr(runner, 'list'): runner.list(self.test) else: for test in iterate_tests(self.test): self.stdout.write('%s\n' % test.id())
def _assert_cloud_details(self, cc): self.assertIsInstance(cc, cloud_config.CloudConfig) self.assertTrue(extras.safe_hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id'])
def get_default_formatter(): """Obtain the default formatter to write to. :return: A file-like object. """ formatter = os.getenv("SUBUNIT_FORMATTER") if formatter: return os.popen(formatter, "w") else: stream = sys.stdout if sys.version_info > (3, 0): if safe_hasattr(stream, 'buffer'): stream = stream.buffer return stream
def __init__(self, tests, name="", level=DEBUG, stdout=sys.stdout): super(LoggingSuite, self).__init__() self.handler = MemoryHandler(1) self._fixture = LogHandler(self.handler, name=name, level=level) self._stdout = stdout # Euristically figure out if we're being passed a single test/suite # or a list of tests. In particular, in case of a single suite we # don't want addTests() to unwrap it by iterating through its tests, # since that would prevent its run() method from being run and by-pass # possible custom logic (e.g. testresources.OptimisingTestSuite). if safe_hasattr(tests, "run"): add = self.addTest else: add = self.addTests add(tests)
def _assert_cloud_details(self, cc): self.assertIsInstance(cc, cloud_config.CloudConfig) self.assertTrue(extras.safe_hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertFalse(cc.config['image_api_use_tasks']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id']) self.assertEqual(cc.get_cache_expiration_time(), 1) self.assertEqual(cc.get_cache_resource_expiration('server'), 5.0) self.assertEqual(cc.get_cache_resource_expiration('image'), 7.0)
def _assert_cloud_details(self, cc): self.assertIsInstance(cc, cloud_config.CloudConfig) self.assertTrue(extras.safe_hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertEqual('testpass', cc.auth['password']) self.assertFalse(cc.config['image_api_use_tasks']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id']) self.assertEqual(cc.get_cache_expiration_time(), 1) self.assertEqual(cc.get_cache_resource_expiration('server'), 5.0) self.assertEqual(cc.get_cache_resource_expiration('image'), 7.0)
def _assert_cloud_details(self, cc): self.assertIsInstance(cc, cloud_config.CloudConfig) self.assertTrue(extras.safe_hasattr(cc, "auth")) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn("username", cc.auth) self.assertEqual("testuser", cc.auth["username"]) self.assertEqual("testpass", cc.auth["password"]) self.assertFalse(cc.config["image_api_use_tasks"]) self.assertTrue("project_name" in cc.auth or "project_id" in cc.auth) if "project_name" in cc.auth: self.assertEqual("testproject", cc.auth["project_name"]) elif "project_id" in cc.auth: self.assertEqual("testproject", cc.auth["project_id"]) self.assertEqual(cc.get_cache_expiration_time(), 1) self.assertEqual(cc.get_cache_resource_expiration("server"), 5.0) self.assertEqual(cc.get_cache_resource_expiration("image"), 7.0)
def get_stream_result(result): """Extract a StreamResult-like result object from the given result. This function will Figure out if the given result object implements the StreamResult API (i.e. the 'status' method), and if not, try to see if the result object is actually a decorator, directly or indirectly wrapping a StreamResult. It's needed because since testtools decorates and nests result objects, we in order to find and use the status() API, we have to traverse the decoration, until we either find a StreamResult-like decorated result, or we reach the bottom. """ while not safe_hasattr(result, "status"): decorated = getattr(result, "decorated", None) if decorated: result = decorated continue raise RuntimeError("Not a stream result") return result
def useFixture(self, fixture): """Use fixture in a test case. The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called. :param fixture: The fixture to use. :return: The fixture, after setting it up and scheduling a cleanup for it. """ try: fixture.setUp() except MultipleExceptions as e: if (fixtures is not None and e.args[-1][0] is fixtures.fixture.SetupError): gather_details(e.args[-1][1].args[0], self.getDetails()) raise except: exc_info = sys.exc_info() try: # fixture._details is not available if using the newer # _setUp() API in Fixtures because it already cleaned up # the fixture. Ideally this whole try/except is not # really needed any more, however, we keep this code to # remain compatible with the older setUp(). if (safe_hasattr(fixture, '_details') and fixture._details is not None): gather_details(fixture.getDetails(), self.getDetails()) except: # Report the setUp exception, then raise the error during # gather_details. self._report_traceback(exc_info) raise else: # Gather_details worked, so raise the exception setUp # encountered. reraise(*exc_info) else: self.addCleanup(fixture.cleanUp) self.addCleanup( gather_details, fixture.getDetails(), self.getDetails()) return fixture
def test_get_one_no_yaml(self): c = config.OpenStackConfig(load_yaml_config=False) cc = c.get_one( region_name='region2', argparse=None, **base.USER_CONF['clouds']['_test_cloud_regions']) # Not using assert_cloud_details because of cache settings which # are not present without the file self.assertIsInstance(cc, cloud_region.CloudRegion) self.assertTrue(extras.safe_hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertEqual('testpass', cc.auth['password']) self.assertFalse(cc.config['image_api_use_tasks']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id']) self.assertEqual(cc.region_name, 'region2')
def test_get_one_cloud_no_yaml(self): c = config.OpenStackConfig(load_yaml_config=False) cc = c.get_one_cloud( region_name='region2', argparse=None, **base.USER_CONF['clouds']['_test_cloud_regions']) # Not using assert_cloud_details because of cache settings which # are not present without the file self.assertIsInstance(cc, cloud_config.CloudConfig) self.assertTrue(extras.safe_hasattr(cc, 'auth')) self.assertIsInstance(cc.auth, dict) self.assertIsNone(cc.cloud) self.assertIn('username', cc.auth) self.assertEqual('testuser', cc.auth['username']) self.assertEqual('testpass', cc.auth['password']) self.assertFalse(cc.config['image_api_use_tasks']) self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) if 'project_name' in cc.auth: self.assertEqual('testproject', cc.auth['project_name']) elif 'project_id' in cc.auth: self.assertEqual('testproject', cc.auth['project_id']) self.assertEqual(cc.region_name, 'region2')
def run_filter_script(result_factory, description, post_run_hook=None, protocol_version=1, passthrough_subunit=True): """Main function for simple subunit filter scripts. Many subunit filter scripts take a stream of subunit input and use a TestResult to handle the events generated by that stream. This function wraps a lot of the boiler-plate around that by making a script with options for handling passthrough information and stream forwarding, and that will exit with a successful return code (i.e. 0) if the input stream represents a successful test run. :param result_factory: A callable that takes an output stream and returns a test result that outputs to that stream. :param description: A description of the filter script. :param protocol_version: What protocol version to consume/emit. :param passthrough_subunit: If True, passthrough should be as subunit. """ parser = make_options(description) (options, args) = parser.parse_args() result = filter_by_result(result_factory, options.output_to, not options.no_passthrough, options.forward, protocol_version=protocol_version, passthrough_subunit=passthrough_subunit, input_stream=find_stream(sys.stdin, args)) if post_run_hook: post_run_hook(result) if not safe_hasattr(result, 'wasSuccessful'): result = result.decorated if result.wasSuccessful(): sys.exit(0) else: sys.exit(1)
def __init__(self, module=__name__, defaultTest=None, argv=None, testRunner=None, testLoader=defaultTestLoader, exit=True, verbosity=1, failfast=None, catchbreak=None, buffer=None, stdout=None): if module == __name__: self.module = None elif istext(module): self.module = __import__(module) for part in module.split('.')[1:]: self.module = getattr(self.module, part) else: self.module = module if argv is None: argv = sys.argv if stdout is None: stdout = sys.stdout self.exit = exit self.failfast = failfast self.catchbreak = catchbreak self.verbosity = verbosity self.buffer = buffer self.defaultTest = defaultTest self.listtests = False self.load_list = None self.testRunner = testRunner self.testLoader = testLoader progName = argv[0] if progName.endswith('%srun.py' % os.path.sep): elements = progName.split(os.path.sep) progName = '%s.run' % elements[-2] else: progName = os.path.basename(argv[0]) self.progName = progName self.parseArgs(argv) if self.load_list: # TODO: preserve existing suites (like testresources does in # OptimisingTestSuite.add, but with a standard protocol). # This is needed because the load_tests hook allows arbitrary # suites, even if that is rarely used. source = open(self.load_list, 'rb') try: lines = source.readlines() finally: source.close() test_ids = set(line.strip().decode('utf-8') for line in lines) self.test = filter_by_ids(self.test, test_ids) if not self.listtests: self.runTests() else: runner = self._get_runner() if safe_hasattr(runner, 'list'): runner.list(self.test) else: for test in iterate_tests(self.test): stdout.write('%s\n' % test.id())
def test_setup_conf(self): collect.setup_conf() self.assertEqual('/var/lib/os-collect-config', cfg.CONF.cachedir) self.assertTrue(extras.safe_hasattr(cfg.CONF, 'ec2')) self.assertTrue(extras.safe_hasattr(cfg.CONF, 'cfn'))
def _fix_discovery(): # Monkey patch in the bugfix from http://bugs.python.org/issue16662 # - the code here is a straight copy from the Python core tree # with the patch applied. global discover_fixed if discover_fixed: return # Do we have a fixed Python? # (not committed upstream yet - so we can't uncomment this code, # but when it gets committed, the next version to be released won't # need monkey patching. # if sys.version_info[:2] > (3, 4): # discover_fixed = True # return if not have_discover: return if safe_hasattr(discover_impl, '_jython_aware_splitext'): _jython_aware_splitext = discover_impl._jython_aware_splitext else: def _jython_aware_splitext(path): if path.lower().endswith('$py.class'): return path[:-9] return os.path.splitext(path)[0] def loadTestsFromModule(self, module, use_load_tests=True, pattern=None): """Return a suite of all tests cases contained in the given module""" # use_load_tests is preserved for compatability though it was never # an official API. # pattern is not an official API either; it is used in discovery to # chain the requested pattern down. tests = [] for name in dir(module): obj = getattr(module, name) if isinstance(obj, type) and issubclass(obj, unittest.TestCase): tests.append(self.loadTestsFromTestCase(obj)) load_tests = getattr(module, 'load_tests', None) tests = self.suiteClass(tests) if use_load_tests and load_tests is not None: try: return load_tests(self, tests, pattern) except Exception as e: return discover_impl._make_failed_load_tests( module.__name__, e, self.suiteClass) return tests def _find_tests(self, start_dir, pattern, namespace=False): """Used by discovery. Yields test suites it loads.""" paths = sorted(os.listdir(start_dir)) for path in paths: full_path = os.path.join(start_dir, path) if os.path.isfile(full_path): if not discover_impl.VALID_MODULE_NAME.match(path): # valid Python identifiers only continue if not self._match_path(path, full_path, pattern): continue # if the test file matches, load it name = self._get_name_from_path(full_path) try: module = self._get_module_from_name(name) except testcase.TestSkipped as e: yield discover_impl._make_skipped_test( name, e, self.suiteClass) except: yield discover_impl._make_failed_import_test( name, self.suiteClass) else: mod_file = os.path.abspath( getattr(module, '__file__', full_path)) realpath = _jython_aware_splitext( os.path.realpath(mod_file)) fullpath_noext = _jython_aware_splitext( os.path.realpath(full_path)) if realpath.lower() != fullpath_noext.lower(): module_dir = os.path.dirname(realpath) mod_name = _jython_aware_splitext( os.path.basename(full_path)) expected_dir = os.path.dirname(full_path) msg = ( "%r module incorrectly imported from %r. Expected %r. " "Is this module globally installed?") raise ImportError(msg % (mod_name, module_dir, expected_dir)) yield self.loadTestsFromModule(module, pattern=pattern) elif os.path.isdir(full_path): if (not namespace and not os.path.isfile( os.path.join(full_path, '__init__.py'))): continue load_tests = None tests = None name = self._get_name_from_path(full_path) try: package = self._get_module_from_name(name) except testcase.TestSkipped as e: yield discover_impl._make_skipped_test( name, e, self.suiteClass) except: yield discover_impl._make_failed_import_test( name, self.suiteClass) else: load_tests = getattr(package, 'load_tests', None) tests = self.loadTestsFromModule(package, pattern=pattern) if tests is not None: # tests loaded from package file yield tests if load_tests is not None: # loadTestsFromModule(package) has load_tests for us. continue # recurse into the package pkg_tests = self._find_tests(full_path, pattern, namespace=namespace) for test in pkg_tests: yield test defaultTestLoaderCls.loadTestsFromModule = loadTestsFromModule defaultTestLoaderCls._find_tests = _find_tests
'--suite', help='Test suite name to use in the output.') (options, _args) = parser.parse_args() return options options = parse_options() if options.input_from: input_stream = open(options.input_from, 'r') else: input_stream = sys.stdin passthrough, forward = False, False result = subunit.filters.filter_by_result( lambda output: testtools.StreamToExtendedDecorator( JenkinsXmlResult(options.suite, output)), options.output_to, passthrough, forward, protocol_version=2, input_stream=input_stream) if options.input_from: input_stream.close() if not extras.safe_hasattr(result, 'wasSuccessful'): result = result.decorated if result.wasSuccessful(): sys.exit(0) else: sys.exit(1)
def decorate(fn): if not safe_hasattr(fn, '__testtools_attrs'): fn.__testtools_attrs = set() fn.__testtools_attrs.update(args) return fn
def __init__(self, module='__main__', defaultTest=None, argv=None, testRunner=None, testLoader=unittest.defaultTestLoader, exit=False, verbosity=1, failfast=None, catchbreak=None, buffer=None, warnings=None, tb_locals=False): if isinstance(module, str): self.module = __import__(module) for part in module.split('.')[1:]: self.module = getattr(self.module, part) else: self.module = module if argv is None: argv = sys.argv self.exit = exit self.failfast = failfast self.catchbreak = catchbreak self.verbosity = verbosity self.buffer = buffer self.tb_locals = tb_locals if warnings is None and not sys.warnoptions: # even if DeprecationWarnings are ignored by default # print them anyway unless other warnings settings are # specified by the warnings arg or the -W python flag self.warnings = 'default' else: # here self.warnings is set either to the value passed # to the warnings args or to None. # If the user didn't pass a value self.warnings will # be None. This means that the behavior is unchanged # and depends on the values passed to -W. self.warnings = warnings self.defaultTest = defaultTest # XXX: Local edit (see http://bugs.python.org/issue22860) self.listtests = False self.load_list = None self.testRunner = testRunner self.testLoader = testLoader self.progName = os.path.basename(argv[0]) self.parseArgs(argv) # XXX: Local edit (see http://bugs.python.org/issue22860) if self.load_list: # TODO(mtreinish): preserve existing suites (like testresources # does in OptimisingTestSuite.add, but with a standard protocol). # This is needed because the load_tests hook allows arbitrary # suites, even if that is rarely used. source = open(self.load_list, 'rb') try: lines = source.readlines() finally: source.close() test_ids = {line.strip().decode('utf-8') for line in lines} self.test = filter_by_ids(self.test, test_ids) # XXX: Local edit (see http://bugs.python.org/issue22860) if not self.listtests: self.runTests() else: runner = self._get_runner() if extras.safe_hasattr(runner, 'list'): try: runner.list(self.test, loader=self.testLoader) except TypeError: runner.list(self.test) else: for test in iterate_tests(self.test): sys.stdout.write('%s\n' % test.id())
def _fix_discovery(): # Monkey patch in the bugfix from http://bugs.python.org/issue16662 # - the code here is a straight copy from the Python core tree # with the patch applied. global discover_fixed if discover_fixed: return # Do we have a fixed Python? # (not committed upstream yet - so we can't uncomment this code, # but when it gets committed, the next version to be released won't # need monkey patching. # if sys.version_info[:2] > (3, 4): # discover_fixed = True # return if not have_discover: return if safe_hasattr(discover_impl, '_jython_aware_splitext'): _jython_aware_splitext = discover_impl._jython_aware_splitext else: def _jython_aware_splitext(path): if path.lower().endswith('$py.class'): return path[:-9] return os.path.splitext(path)[0] def loadTestsFromModule(self, module, use_load_tests=True, pattern=None): """Return a suite of all tests cases contained in the given module""" # use_load_tests is preserved for compatability though it was never # an official API. # pattern is not an official API either; it is used in discovery to # chain the requested pattern down. tests = [] for name in dir(module): obj = getattr(module, name) if isinstance(obj, type) and issubclass(obj, unittest.TestCase): tests.append(self.loadTestsFromTestCase(obj)) load_tests = getattr(module, 'load_tests', None) tests = self.suiteClass(tests) if use_load_tests and load_tests is not None: try: return load_tests(self, tests, pattern) except Exception as e: return discover_impl._make_failed_load_tests( module.__name__, e, self.suiteClass) return tests def _find_tests(self, start_dir, pattern, namespace=False): """Used by discovery. Yields test suites it loads.""" paths = sorted(os.listdir(start_dir)) for path in paths: full_path = os.path.join(start_dir, path) if os.path.isfile(full_path): if not discover_impl.VALID_MODULE_NAME.match(path): # valid Python identifiers only continue if not self._match_path(path, full_path, pattern): continue # if the test file matches, load it name = self._get_name_from_path(full_path) try: module = self._get_module_from_name(name) except testcase.TestSkipped as e: yield discover_impl._make_skipped_test( name, e, self.suiteClass) except: yield discover_impl._make_failed_import_test( name, self.suiteClass) else: mod_file = os.path.abspath(getattr(module, '__file__', full_path)) realpath = _jython_aware_splitext( os.path.realpath(mod_file)) fullpath_noext = _jython_aware_splitext( os.path.realpath(full_path)) if realpath.lower() != fullpath_noext.lower(): module_dir = os.path.dirname(realpath) mod_name = _jython_aware_splitext( os.path.basename(full_path)) expected_dir = os.path.dirname(full_path) msg = ("%r module incorrectly imported from %r. Expected %r. " "Is this module globally installed?") raise ImportError(msg % (mod_name, module_dir, expected_dir)) yield self.loadTestsFromModule(module, pattern=pattern) elif os.path.isdir(full_path): if (not namespace and not os.path.isfile(os.path.join(full_path, '__init__.py'))): continue load_tests = None tests = None name = self._get_name_from_path(full_path) try: package = self._get_module_from_name(name) except testcase.TestSkipped as e: yield discover_impl._make_skipped_test( name, e, self.suiteClass) except: yield discover_impl._make_failed_import_test( name, self.suiteClass) else: load_tests = getattr(package, 'load_tests', None) tests = self.loadTestsFromModule(package, pattern=pattern) if tests is not None: # tests loaded from package file yield tests if load_tests is not None: # loadTestsFromModule(package) has load_tests for us. continue # recurse into the package pkg_tests = self._find_tests( full_path, pattern, namespace=namespace) for test in pkg_tests: yield test defaultTestLoaderCls.loadTestsFromModule = loadTestsFromModule defaultTestLoaderCls._find_tests = _find_tests
def test_attribute_not_there(self): class Foo(object): pass self.assertEqual(False, safe_hasattr(Foo(), 'anything'))
def _parse(self, packet, result): # 2 bytes flags, at most 3 bytes length. header = read_exactly(self.source, 5) packet.append(header) flags = struct.unpack(FMT_16, header[:2])[0] length, consumed = self._parse_varint(header, 2, max_3_bytes=True) remainder = read_exactly(self.source, length - 6) if consumed != 3: # Avoid having to parse torn values packet[-1] += remainder pos = 2 + consumed else: # Avoid copying potentially lots of data. packet.append(remainder) pos = 0 crc = zlib.crc32(packet[0]) for fragment in packet[1:-1]: crc = zlib.crc32(fragment, crc) crc = zlib.crc32(packet[-1][:-4], crc) & 0xffffffff packet_crc = struct.unpack(FMT_32, packet[-1][-4:])[0] if crc != packet_crc: # Bad CRC, report it and stop parsing the packet. raise ParseError( 'Bad checksum - calculated (0x%x), stored (0x%x)' % (crc, packet_crc)) if safe_hasattr(builtins, 'memoryview'): body = memoryview(packet[-1]) else: body = packet[-1] # Discard CRC-32 body = body[:-4] # One packet could have both file and status data; the Python API # presents these separately (perhaps it shouldn't?) if flags & FLAG_TIMESTAMP: seconds = struct.unpack(FMT_32, self._to_bytes(body, pos, 4))[0] nanoseconds, consumed = self._parse_varint(body, pos + 4) pos = pos + 4 + consumed timestamp = EPOCH + datetime.timedelta( seconds=seconds, microseconds=nanoseconds / 1000) else: timestamp = None if flags & FLAG_TEST_ID: test_id, pos = self._read_utf8(body, pos) else: test_id = None if flags & FLAG_TAGS: tag_count, consumed = self._parse_varint(body, pos) pos += consumed test_tags = set() for _ in range(tag_count): tag, pos = self._read_utf8(body, pos) test_tags.add(tag) else: test_tags = None if flags & FLAG_MIME_TYPE: mime_type, pos = self._read_utf8(body, pos) else: mime_type = None if flags & FLAG_FILE_CONTENT: file_name, pos = self._read_utf8(body, pos) content_length, consumed = self._parse_varint(body, pos) pos += consumed file_bytes = self._to_bytes(body, pos, content_length) if len(file_bytes) != content_length: raise ParseError('File content extends past end of packet: ' 'claimed %d bytes, %d available' % (content_length, len(file_bytes))) pos += content_length else: file_name = None file_bytes = None if flags & FLAG_ROUTE_CODE: route_code, pos = self._read_utf8(body, pos) else: route_code = None runnable = bool(flags & FLAG_RUNNABLE) eof = bool(flags & FLAG_EOF) test_status = self.status_lookup[flags & 0x0007] result.status(test_id=test_id, test_status=test_status, test_tags=test_tags, runnable=runnable, mime_type=mime_type, eof=eof, file_name=file_name, file_bytes=file_bytes, route_code=route_code, timestamp=timestamp)
def _parse(self, packet, result): # 2 bytes flags, at most 3 bytes length. packet.append(self.source.read(5)) flags = struct.unpack(FMT_16, packet[-1][:2])[0] length, consumed = self._parse_varint( packet[-1], 2, max_3_bytes=True) remainder = self.source.read(length - 6) if len(remainder) != length - 6: raise ParseError( 'Short read - got %d bytes, wanted %d bytes' % ( len(remainder), length - 6)) if consumed != 3: # Avoid having to parse torn values packet[-1] += remainder pos = 2 + consumed else: # Avoid copying potentially lots of data. packet.append(remainder) pos = 0 crc = zlib.crc32(packet[0]) for fragment in packet[1:-1]: crc = zlib.crc32(fragment, crc) crc = zlib.crc32(packet[-1][:-4], crc) & 0xffffffff packet_crc = struct.unpack(FMT_32, packet[-1][-4:])[0] if crc != packet_crc: # Bad CRC, report it and stop parsing the packet. raise ParseError( 'Bad checksum - calculated (0x%x), stored (0x%x)' % (crc, packet_crc)) if safe_hasattr(builtins, 'memoryview'): body = memoryview(packet[-1]) else: body = packet[-1] # Discard CRC-32 body = body[:-4] # One packet could have both file and status data; the Python API # presents these separately (perhaps it shouldn't?) if flags & FLAG_TIMESTAMP: seconds = struct.unpack(FMT_32, self._to_bytes(body, pos, 4))[0] nanoseconds, consumed = self._parse_varint(body, pos+4) pos = pos + 4 + consumed timestamp = EPOCH + datetime.timedelta( seconds=seconds, microseconds=nanoseconds/1000) else: timestamp = None if flags & FLAG_TEST_ID: test_id, pos = self._read_utf8(body, pos) else: test_id = None if flags & FLAG_TAGS: tag_count, consumed = self._parse_varint(body, pos) pos += consumed test_tags = set() for _ in range(tag_count): tag, pos = self._read_utf8(body, pos) test_tags.add(tag) else: test_tags = None if flags & FLAG_MIME_TYPE: mime_type, pos = self._read_utf8(body, pos) else: mime_type = None if flags & FLAG_FILE_CONTENT: file_name, pos = self._read_utf8(body, pos) content_length, consumed = self._parse_varint(body, pos) pos += consumed file_bytes = self._to_bytes(body, pos, content_length) if len(file_bytes) != content_length: raise ParseError('File content extends past end of packet: ' 'claimed %d bytes, %d available' % ( content_length, len(file_bytes))) pos += content_length else: file_name = None file_bytes = None if flags & FLAG_ROUTE_CODE: route_code, pos = self._read_utf8(body, pos) else: route_code = None runnable = bool(flags & FLAG_RUNNABLE) eof = bool(flags & FLAG_EOF) test_status = self.status_lookup[flags & 0x0007] result.status(test_id=test_id, test_status=test_status, test_tags=test_tags, runnable=runnable, mime_type=mime_type, eof=eof, file_name=file_name, file_bytes=file_bytes, route_code=route_code, timestamp=timestamp)
def _set_failfast(self, value): if safe_hasattr(self.decorated, 'failfast'): self.decorated.failfast = value else: self._failfast = value