def shell_cmd(verbose, with_req_context): try: from IPython.terminal.ipapp import TerminalIPythonApp except ImportError: click.echo( cformat( '%{red!}You need to `pip install ipython` to use the SNMS shell' )) sys.exit(1) current_app.config['REPL'] = True # disables e.g. memoize_request request_stats_request_started() context, info = _make_shell_context() banner = cformat('%{yellow!}SNMS v{} is ready for your commands').format( snms.__version__) if verbose: banner = '\n'.join(info + ['', banner]) ctx = current_app.make_shell_context() ctx.update(context) # clearCache() stack = ExitStack() if with_req_context: stack.enter_context( current_app.test_request_context(base_url=config.BASE_URL)) with stack: ipython_app = TerminalIPythonApp.instance(user_ns=ctx, display_banner=False) ipython_app.initialize(argv=[]) ipython_app.shell.show_banner(banner) ipython_app.start()
def __call__(s, *args, **kwargs): stack = ExitStack() stack.enter_context(self.flask_app.app_context()) if getattr(s, 'request_context', False): stack.enter_context( self.flask_app.test_request_context( base_url=config.BASE_URL)) args = _CelerySAWrapper.unwrap_args(args) kwargs = _CelerySAWrapper.unwrap_kwargs(kwargs) with stack: request_stats_request_started() process_start.send() return super(SnmsTask, s).__call__(*args, **kwargs)
def open_sharded_tfrecords_with_splits(exit_stack: contextlib2.ExitStack, base_path: str, num_shards: int, shard_splits: List[float], split_names: List[str]) \ -> List[contextlib2.ExitStack]: """ Open exits stacks to enable the writing of files over several shards. :param exit_stack: THe stack to communicate where the shard is being written. :param base_path: The base path for the shard path. :param num_shards: The number of shards being created. :param shard_splits: The ratios at which the shards are split. :param split_names: The names of each the shard splits. :return: List of exit stacks to store where each the shards are being written. :raise AssertionError: If the sum of the ratios is less than 1. :raise AssertionError: If there are less names then the number of splits. """ assert sum(shard_splits) <= 1.0, \ f"The sum of shard_split need to be less than or equal 1. (! {sum(shard_splits)} <= 1.0)" if split_names is not None: assert len(split_names) >= len(shard_splits), \ "There need to be more shard names then there shards to be named if they are to be named." else: split_names = [f"split_{i}" for i in range(len(shard_splits))] splits = [ int((sum(shard_splits[:i]) + a) * num_shards) for i, a in enumerate(shard_splits) ] return [ exit_stack.enter_context( tf.python_io.TFRecordWriter( f"{base_path}/{split_names[sidx]}-{idx:05d}-of-{num_shards:05d}" )) for sidx, s in enumerate(np.split(np.arange(1, num_shards), splits)) for idx in s ]
def _open_sharded_output_tfrecords( exit_stack: contextlib2.ExitStack, base_path: str, num_shards: int, ) -> List: """ Opens all TFRecord shards for writing and adds them to an exit stack. Modified from original code in the TensorFlow Object Detection API: https://github.com/tensorflow/models/object-detection/research/object_detection/dataset_tools/tf_record_creation_util.py :param exit_stack: a contextlib2.ExitStack used to automatically close the TFRecords opened in this function :param base_path: the base file path for all shards :param num_shards: number of shards :return: a list of opened TFRecord shard files (position k in the list corresponds to shard k) """ tf_record_output_filenames = [ f'{base_path}-{str(idx).zfill(5)}-of-{str(num_shards).zfill(5)}' for idx in range(num_shards) ] tfrecords = [ exit_stack.enter_context(TFRecordWriter(file_name)) for file_name in tf_record_output_filenames ] return tfrecords
class BlockMetric: """Enable tracking on a block of code""" #: Trackers activated during the execution of the block of code trackers = [executions, errors, processing_time] def __init__(self, client, metric): self.client = client self.metric = metric def __enter__(self): self.stack = ExitStack() for tracker in self.trackers: self.stack.enter_context(tracker(self.client, self.metric)) def __exit__(self, exc_type, exc_val, exc_tb): self.stack.__exit__(exc_type, exc_val, exc_tb)
class ResourcesStack(object): # Wraps an ExitStack to support execution in environments like Dagstermill, where we can't # wrap solid execution/the pipeline execution context lifecycle in an ordinary Python context # manager (because notebook execution is cell-by-cell in the Jupyter kernel, a subprocess we # don't directly control). In these environments we need to manually create and teardown # resources. def __init__(self, pipeline_def, environment_config, pipeline_run, log_manager, resource_keys_to_init): check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(environment_config, 'environment_config', EnvironmentConfig) check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) check.inst_param(log_manager, 'log_manager', DagsterLogManager) check.set_param(resource_keys_to_init, 'resource_keys_to_init', of_type=str) self.resource_instances = {} self.mode_definition = pipeline_def.get_mode_definition( pipeline_run.mode) self.pipeline_def = pipeline_def self.environment_config = environment_config self.pipeline_run = pipeline_run self.log_manager = log_manager self.stack = ExitStack() self.resource_keys_to_init = resource_keys_to_init def create(self): for resource_name, resource_def in sorted( self.mode_definition.resource_defs.items()): if not resource_name in self.resource_keys_to_init: continue user_fn = create_resource_fn_lambda( self.pipeline_def, resource_def, self.environment_config.resources.get(resource_name, {}).get('config'), self.pipeline_run.run_id, self.log_manager, ) def _create_msg_fn(rn): return lambda: 'Error executing resource_fn on ResourceDefinition {name}'.format( name=rn) resource_obj = self.stack.enter_context( user_code_context_manager(user_fn, DagsterResourceFunctionError, _create_msg_fn(resource_name))) self.resource_instances[resource_name] = resource_obj return ScopedResourcesBuilder(self.resource_instances) def teardown(self): self.stack.close()
class LoggedExitStack(object): def __init__(self, logger, context_managers=None): self._logger = logger self._exit_stack = ExitStack() # TODO make this cleaner, types would be nice context_managers = context_managers if context_managers is not None else [] self.context_managers = (context_managers if isinstance( context_managers, list) else [context_managers]) def __enter__(self): self._exit_stack.__enter__() for context_manager in self.context_managers: self._exit_stack.enter_context( LogScope(self._logger, context_manager.__class__.__name__)) self._exit_stack.enter_context(context_manager) return self def __exit__(self, *args): return self._exit_stack.__exit__(*args)
def open_sharded_tfrecords(exit_stack: contextlib2.ExitStack, base_path: str, num_shards: int) \ -> List[contextlib2.ExitStack]: """ Open exits stacks to enable the writing of files over several shards. :param exit_stack: THe stack to communicate where the shard is being written. :param base_path: The base path for the shard path. :param num_shards: The number of shards being created. :return: List of exit stacks to store where each the shards are being written. """ return [ exit_stack.enter_context( tf.io.TFRecordWriter(f"{base_path}-{idx:05d}-of-{num_shards:05d}")) for idx in range(1, num_shards + 1) ]
def shell_cmd(verbose, with_req_context): try: from IPython.terminal.ipapp import TerminalIPythonApp except ImportError: click.echo(cformat('%{red!}You need to `pip install ipython` to use the Indico shell')) sys.exit(1) current_app.config['REPL'] = True # disables e.g. memoize_request request_stats_request_started() context, info = _make_shell_context() banner = cformat('%{yellow!}Indico v{} is ready for your commands').format(indico.__version__) if verbose: banner = '\n'.join(info + ['', banner]) ctx = current_app.make_shell_context() ctx.update(context) clearCache() stack = ExitStack() if with_req_context: stack.enter_context(current_app.test_request_context(base_url=config.BASE_URL)) with stack: ipython_app = TerminalIPythonApp.instance(user_ns=ctx, display_banner=False) ipython_app.initialize(argv=[]) ipython_app.shell.show_banner(banner) ipython_app.start()
def __call__(s, *args, **kwargs): stack = ExitStack() stack.enter_context(self.flask_app.app_context()) stack.enter_context(DBMgr.getInstance().global_connection()) if getattr(s, 'request_context', False): stack.enter_context(self.flask_app.test_request_context()) args = _CelerySAWrapper.unwrap_args(args) kwargs = _CelerySAWrapper.unwrap_kwargs(kwargs) plugin = getattr(s, 'plugin', kwargs.pop('__current_plugin__', None)) if isinstance(plugin, basestring): plugin_name = plugin plugin = plugin_engine.get_plugin(plugin) if plugin is None: stack.close() raise ValueError('Plugin not active: ' + plugin_name) stack.enter_context(plugin_context(plugin)) with stack: return super(IndicoTask, s).__call__(*args, **kwargs)
def __call__(s, *args, **kwargs): stack = ExitStack() stack.enter_context(self.flask_app.app_context()) stack.enter_context(DBMgr.getInstance().global_connection()) if getattr(s, 'request_context', False): stack.enter_context( self.flask_app.test_request_context( base_url=Config.getInstance().getBaseURL())) args = _CelerySAWrapper.unwrap_args(args) kwargs = _CelerySAWrapper.unwrap_kwargs(kwargs) plugin = getattr(s, 'plugin', kwargs.pop('__current_plugin__', None)) if isinstance(plugin, basestring): plugin_name = plugin plugin = plugin_engine.get_plugin(plugin) if plugin is None: stack.close() raise ValueError('Plugin not active: ' + plugin_name) stack.enter_context(plugin_context(plugin)) clearCache() with stack: return super(IndicoTask, s).__call__(*args, **kwargs)
def __call__(s, *args, **kwargs): stack = ExitStack() stack.enter_context(self.flask_app.app_context()) if getattr(s, 'request_context', False): stack.enter_context(self.flask_app.test_request_context(base_url=config.BASE_URL)) args = _CelerySAWrapper.unwrap_args(args) kwargs = _CelerySAWrapper.unwrap_kwargs(kwargs) plugin = getattr(s, 'plugin', s.request.get('indico_plugin')) if isinstance(plugin, basestring): plugin_name = plugin plugin = plugin_engine.get_plugin(plugin) if plugin is None: stack.close() raise ValueError('Plugin not active: ' + plugin_name) stack.enter_context(plugin_context(plugin)) clearCache() with stack: request_stats_request_started() return super(IndicoTask, s).__call__(*args, **kwargs)
def __call__(s, *args, **kwargs): stack = ExitStack() stack.enter_context(self.flask_app.app_context()) if getattr(s, 'request_context', False): stack.enter_context( self.flask_app.test_request_context( base_url=config.BASE_URL)) args = _CelerySAWrapper.unwrap_args(args) kwargs = _CelerySAWrapper.unwrap_kwargs(kwargs) plugin = getattr(s, 'plugin', s.request.get('indico_plugin')) if isinstance(plugin, basestring): plugin_name = plugin plugin = plugin_engine.get_plugin(plugin) if plugin is None: stack.close() raise ValueError('Plugin not active: ' + plugin_name) stack.enter_context(plugin_context(plugin)) clearCache() with stack: request_stats_request_started() return super(IndicoTask, s).__call__(*args, **kwargs)
class ZiplineTestCase(with_metaclass(FinalMeta, TestCase)): """ Shared extensions to core unittest.TestCase. Overrides the default unittest setUp/tearDown functions with versions that use ExitStack to correctly clean up resources, even in the face of exceptions that occur during setUp/setUpClass. Subclasses **should not override setUp or setUpClass**! Instead, they should implement `init_instance_fixtures` for per-test-method resources, and `init_class_fixtures` for per-class resources. Resources that need to be cleaned up should be registered using either `enter_{class,instance}_context` or `add_{class,instance}_callback}. """ _in_setup = False @final @classmethod def setUpClass(cls): cls._class_teardown_stack = ExitStack() try: cls._base_init_fixtures_was_called = False cls.init_class_fixtures() assert cls._base_init_fixtures_was_called, ( "ZiplineTestCase.init_class_fixtures() was not called.\n" "This probably means that you overrode init_class_fixtures" " without calling super().") except: cls.tearDownClass() raise @classmethod def init_class_fixtures(cls): """ Override and implement this classmethod to register resources that should be created and/or torn down on a per-class basis. Subclass implementations of this should always invoke this with super() to ensure that fixture mixins work properly. """ if cls._in_setup: raise ValueError( 'Called init_class_fixtures from init_instance_fixtures.' 'Did you write super(..., self).init_class_fixtures() instead' ' of super(..., self).init_instance_fixtures()?', ) cls._base_init_fixtures_was_called = True @final @classmethod def tearDownClass(cls): cls._class_teardown_stack.close() @final @classmethod def enter_class_context(cls, context_manager): """ Enter a context manager to be exited during the tearDownClass """ if cls._in_setup: raise ValueError( 'Attempted to enter a class context in init_instance_fixtures.' '\nDid you mean to call enter_instance_context?', ) return cls._class_teardown_stack.enter_context(context_manager) @final @classmethod def add_class_callback(cls, callback): """ Register a callback to be executed during tearDownClass. Parameters ---------- callback : callable The callback to invoke at the end of the test suite. """ if cls._in_setup: raise ValueError( 'Attempted to add a class callback in init_instance_fixtures.' '\nDid you mean to call add_instance_callback?', ) return cls._class_teardown_stack.callback(callback) @final def setUp(self): type(self)._in_setup = True self._instance_teardown_stack = ExitStack() try: self._init_instance_fixtures_was_called = False self.init_instance_fixtures() assert self._init_instance_fixtures_was_called, ( "ZiplineTestCase.init_instance_fixtures() was not" " called.\n" "This probably means that you overrode" " init_instance_fixtures without calling super().") except: self.tearDown() raise finally: type(self)._in_setup = False def init_instance_fixtures(self): self._init_instance_fixtures_was_called = True @final def tearDown(self): self._instance_teardown_stack.close() @final def enter_instance_context(self, context_manager): """ Enter a context manager that should be exited during tearDown. """ return self._instance_teardown_stack.enter_context(context_manager) @final def add_instance_callback(self, callback): """ Register a callback to be executed during tearDown. Parameters ---------- callback : callable The callback to invoke at the end of each test. """ return self._instance_teardown_stack.callback(callback)
class NosePlugin(Plugin): configSection = 'mailman' def __init__(self): super(NosePlugin, self).__init__() self.patterns = [] self.stderr = False self.record = False def set_stderr(ignore): self.stderr = True self.addArgument(self.patterns, 'P', 'pattern', 'Add a test matching pattern') self.addFlag(set_stderr, 'E', 'stderr', 'Enable stderr logging to sub-runners') def set_record(ignore): self.record = True self.addFlag( set_record, 'R', 'rerecord', """Force re-recording of test responses. Requires Mailman to be running.""") self._data_path = os.path.join(TOPDIR, 'tests', 'data', 'tape.yaml') self._resources = ExitStack() self._recorder = get_vcr() def startTestRun(self, event): # Check to see if we're running the test suite in record mode. If so, # delete any existing recording. if self.record: try: os.remove(self._data_path) except OSError as error: if error.errno != errno.ENOENT: raise # This will automatically create the recording file. self._resources.enter_context( self._recorder.use_cassette(self._data_path)) def stopTestRun(self, event): # Stop all recording. self._resources.close() def getTestCaseNames(self, event): if len(self.patterns) == 0: # No filter patterns, so everything should be tested. return # Does the pattern match the fully qualified class name? for pattern in self.patterns: full_class_name = '{}.{}'.format(event.testCase.__module__, event.testCase.__name__) if re.search(pattern, full_class_name): # Don't suppress this test class. return names = filter(event.isTestMethod, dir(event.testCase)) for name in names: full_test_name = '{}.{}.{}'.format(event.testCase.__module__, event.testCase.__name__, name) for pattern in self.patterns: if re.search(pattern, full_test_name): break else: event.excludedNames.append(name) def handleFile(self, event): path = event.path[len(TOPDIR) + 1:] if len(self.patterns) > 0: for pattern in self.patterns: if re.search(pattern, path): break else: # Skip this doctest. return base, ext = os.path.splitext(path) if ext != '.rst': return test = doctest.DocFileTest(path, package=mailmanclient, optionflags=FLAGS, setUp=setup, tearDown=teardown) # Suppress the extra "Doctest: ..." line. test.shortDescription = lambda: None event.extraTests.append(test)
class NosePlugin(Plugin): configSection = 'mailman' def __init__(self): super(NosePlugin, self).__init__() self.patterns = [] self.stderr = False self.record = False def set_stderr(ignore): self.stderr = True self.addArgument(self.patterns, 'P', 'pattern', 'Add a test matching pattern') self.addFlag(set_stderr, 'E', 'stderr', 'Enable stderr logging to sub-runners') def set_record(ignore): self.record = True self.addFlag(set_record, 'R', 'rerecord', """Force re-recording of test responses. Requires Mailman to be running.""") self._data_path = os.path.join(TOPDIR, 'tests', 'data', 'tape.yaml') self._resources = ExitStack() def startTestRun(self, event): # Check to see if we're running the test suite in record mode. If so, # delete any existing recording. if self.record: try: os.remove(self._data_path) except OSError as error: if error.errno != errno.ENOENT: raise # This will automatically create the recording file. self._resources.enter_context(vcr.use_cassette(self._data_path)) def stopTestRun(self, event): # Stop all recording. self._resources.close() def getTestCaseNames(self, event): if len(self.patterns) == 0: # No filter patterns, so everything should be tested. return # Does the pattern match the fully qualified class name? for pattern in self.patterns: full_class_name = '{}.{}'.format( event.testCase.__module__, event.testCase.__name__) if re.search(pattern, full_class_name): # Don't suppress this test class. return names = filter(event.isTestMethod, dir(event.testCase)) for name in names: full_test_name = '{}.{}.{}'.format( event.testCase.__module__, event.testCase.__name__, name) for pattern in self.patterns: if re.search(pattern, full_test_name): break else: event.excludedNames.append(name) def handleFile(self, event): path = event.path[len(TOPDIR)+1:] if len(self.patterns) > 0: for pattern in self.patterns: if re.search(pattern, path): break else: # Skip this doctest. return base, ext = os.path.splitext(path) if ext != '.rst': return test = doctest.DocFileTest( path, package=mailmanclient, optionflags=FLAGS, setUp=setup, tearDown=teardown) # Suppress the extra "Doctest: ..." line. test.shortDescription = lambda: None event.extraTests.append(test)
class ZiplineTestCase(with_metaclass(FinalMeta, TestCase)): """ Shared extensions to core unittest.TestCase. Overrides the default unittest setUp/tearDown functions with versions that use ExitStack to correctly clean up resources, even in the face of exceptions that occur during setUp/setUpClass. Subclasses **should not override setUp or setUpClass**! Instead, they should implement `init_instance_fixtures` for per-test-method resources, and `init_class_fixtures` for per-class resources. Resources that need to be cleaned up should be registered using either `enter_{class,instance}_context` or `add_{class,instance}_callback}. """ _in_setup = False @final @classmethod def setUpClass(cls): cls._class_teardown_stack = ExitStack() try: cls._base_init_fixtures_was_called = False cls.init_class_fixtures() assert cls._base_init_fixtures_was_called, ( "ZiplineTestCase.init_class_fixtures() was not called.\n" "This probably means that you overrode init_class_fixtures" " without calling super()." ) except: cls.tearDownClass() raise @classmethod def init_class_fixtures(cls): """ Override and implement this classmethod to register resources that should be created and/or torn down on a per-class basis. Subclass implementations of this should always invoke this with super() to ensure that fixture mixins work properly. """ if cls._in_setup: raise ValueError( 'Called init_class_fixtures from init_instance_fixtures.' 'Did you write super(..., self).init_class_fixtures() instead' ' of super(..., self).init_instance_fixtures()?', ) cls._base_init_fixtures_was_called = True @final @classmethod def tearDownClass(cls): cls._class_teardown_stack.close() @final @classmethod def enter_class_context(cls, context_manager): """ Enter a context manager to be exited during the tearDownClass """ if cls._in_setup: raise ValueError( 'Attempted to enter a class context in init_instance_fixtures.' '\nDid you mean to call enter_instance_context?', ) return cls._class_teardown_stack.enter_context(context_manager) @final @classmethod def add_class_callback(cls, callback): """ Register a callback to be executed during tearDownClass. Parameters ---------- callback : callable The callback to invoke at the end of the test suite. """ if cls._in_setup: raise ValueError( 'Attempted to add a class callback in init_instance_fixtures.' '\nDid you mean to call add_instance_callback?', ) return cls._class_teardown_stack.callback(callback) @final def setUp(self): type(self)._in_setup = True self._instance_teardown_stack = ExitStack() try: self._init_instance_fixtures_was_called = False self.init_instance_fixtures() assert self._init_instance_fixtures_was_called, ( "ZiplineTestCase.init_instance_fixtures() was not" " called.\n" "This probably means that you overrode" " init_instance_fixtures without calling super()." ) except: self.tearDown() raise finally: type(self)._in_setup = False def init_instance_fixtures(self): self._init_instance_fixtures_was_called = True @final def tearDown(self): self._instance_teardown_stack.close() @final def enter_instance_context(self, context_manager): """ Enter a context manager that should be exited during tearDown. """ return self._instance_teardown_stack.enter_context(context_manager) @final def add_instance_callback(self, callback): """ Register a callback to be executed during tearDown. Parameters ---------- callback : callable The callback to invoke at the end of each test. """ return self._instance_teardown_stack.callback(callback)
class ZiplineTestCase(with_metaclass(FinalMeta, TestCase)): """ Shared extensions to core unittest.TestCase. Overrides the default unittest setUp/tearDown functions with versions that use ExitStack to correctly clean up resources, even in the face of exceptions that occur during setUp/setUpClass. Subclasses **should not override setUp or setUpClass**! Instead, they should implement `init_instance_fixtures` for per-test-method resources, and `init_class_fixtures` for per-class resources. Resources that need to be cleaned up should be registered using either `enter_{class,instance}_context` or `add_{class,instance}_callback}. """ _in_setup = False @final @classmethod def setUpClass(cls): # Hold a set of all the "static" attributes on the class. These are # things that are not populated after the class was created like # methods or other class level attributes. cls._static_class_attributes = set(vars(cls)) cls._class_teardown_stack = ExitStack() try: cls._base_init_fixtures_was_called = False cls.init_class_fixtures() assert cls._base_init_fixtures_was_called, ( "ZiplineTestCase.init_class_fixtures() was not called.\n" "This probably means that you overrode init_class_fixtures" " without calling super()." ) except: cls.tearDownClass() raise @classmethod def init_class_fixtures(cls): """ Override and implement this classmethod to register resources that should be created and/or torn down on a per-class basis. Subclass implementations of this should always invoke this with super() to ensure that fixture mixins work properly. """ if cls._in_setup: raise ValueError( 'Called init_class_fixtures from init_instance_fixtures.' 'Did you write super(..., self).init_class_fixtures() instead' ' of super(..., self).init_instance_fixtures()?', ) cls._base_init_fixtures_was_called = True @final @classmethod def tearDownClass(cls): # We need to get this before it's deleted by the loop. stack = cls._class_teardown_stack for name in set(vars(cls)) - cls._static_class_attributes: # Remove all of the attributes that were added after the class was # constructed. This cleans up any large test data that is class # scoped while still allowing subclasses to access class level # attributes. delattr(cls, name) stack.close() @final @classmethod def enter_class_context(cls, context_manager): """ Enter a context manager to be exited during the tearDownClass """ if cls._in_setup: raise ValueError( 'Attempted to enter a class context in init_instance_fixtures.' '\nDid you mean to call enter_instance_context?', ) return cls._class_teardown_stack.enter_context(context_manager) @final @classmethod def add_class_callback(cls, callback): """ Register a callback to be executed during tearDownClass. Parameters ---------- callback : callable The callback to invoke at the end of the test suite. """ if cls._in_setup: raise ValueError( 'Attempted to add a class callback in init_instance_fixtures.' '\nDid you mean to call add_instance_callback?', ) return cls._class_teardown_stack.callback(callback) @final def setUp(self): type(self)._in_setup = True self._pre_setup_attrs = set(vars(self)) self._instance_teardown_stack = ExitStack() try: self._init_instance_fixtures_was_called = False self.init_instance_fixtures() assert self._init_instance_fixtures_was_called, ( "ZiplineTestCase.init_instance_fixtures() was not" " called.\n" "This probably means that you overrode" " init_instance_fixtures without calling super()." ) except: self.tearDown() raise finally: type(self)._in_setup = False def init_instance_fixtures(self): self._init_instance_fixtures_was_called = True @final def tearDown(self): # We need to get this before it's deleted by the loop. stack = self._instance_teardown_stack for attr in set(vars(self)) - self._pre_setup_attrs: delattr(self, attr) stack.close() @final def enter_instance_context(self, context_manager): """ Enter a context manager that should be exited during tearDown. """ return self._instance_teardown_stack.enter_context(context_manager) @final def add_instance_callback(self, callback): """ Register a callback to be executed during tearDown. Parameters ---------- callback : callable The callback to invoke at the end of each test. """ return self._instance_teardown_stack.callback(callback)
class ZiplineTestCase(with_metaclass(FinalMeta, TestCase)): """ Shared extensions to core unittest.TestCase. Overrides the default unittest setUp/tearDown functions with versions that use ExitStack to correctly clean up resources, even in the face of exceptions that occur during setUp/setUpClass. Subclasses **should not override setUp or setUpClass**! Instead, they should implement `init_instance_fixtures` for per-test-method resources, and `init_class_fixtures` for per-class resources. Resources that need to be cleaned up should be registered using either `enter_{class,instance}_context` or `add_{class,instance}_callback}. """ _in_setup = False @final @classmethod def setUpClass(cls): # Hold a set of all the "static" attributes on the class. These are # things that are not populated after the class was created like # methods or other class level attributes. cls._static_class_attributes = set(vars(cls)) cls._class_teardown_stack = ExitStack() try: cls._base_init_fixtures_was_called = False cls.init_class_fixtures() assert cls._base_init_fixtures_was_called, ( "ZiplineTestCase.init_class_fixtures() was not called.\n" "This probably means that you overrode init_class_fixtures" " without calling super().") except: cls.tearDownClass() raise @classmethod def init_class_fixtures(cls): """ Override and implement this classmethod to register resources that should be created and/or torn down on a per-class basis. Subclass implementations of this should always invoke this with super() to ensure that fixture mixins work properly. """ if cls._in_setup: raise ValueError( 'Called init_class_fixtures from init_instance_fixtures.' 'Did you write super(..., self).init_class_fixtures() instead' ' of super(..., self).init_instance_fixtures()?', ) cls._base_init_fixtures_was_called = True @final @classmethod def tearDownClass(cls): # We need to get this before it's deleted by the loop. stack = cls._class_teardown_stack for name in set(vars(cls)) - cls._static_class_attributes: # Remove all of the attributes that were added after the class was # constructed. This cleans up any large test data that is class # scoped while still allowing subclasses to access class level # attributes. delattr(cls, name) stack.close() @final @classmethod def enter_class_context(cls, context_manager): """ Enter a context manager to be exited during the tearDownClass """ if cls._in_setup: raise ValueError( 'Attempted to enter a class context in init_instance_fixtures.' '\nDid you mean to call enter_instance_context?', ) return cls._class_teardown_stack.enter_context(context_manager) @final @classmethod def add_class_callback(cls, callback): """ Register a callback to be executed during tearDownClass. Parameters ---------- callback : callable The callback to invoke at the end of the test suite. """ if cls._in_setup: raise ValueError( 'Attempted to add a class callback in init_instance_fixtures.' '\nDid you mean to call add_instance_callback?', ) return cls._class_teardown_stack.callback(callback) @final def setUp(self): type(self)._in_setup = True self._pre_setup_attrs = set(vars(self)) self._instance_teardown_stack = ExitStack() try: self._init_instance_fixtures_was_called = False self.init_instance_fixtures() assert self._init_instance_fixtures_was_called, ( "ZiplineTestCase.init_instance_fixtures() was not" " called.\n" "This probably means that you overrode" " init_instance_fixtures without calling super().") except: self.tearDown() raise finally: type(self)._in_setup = False def init_instance_fixtures(self): self._init_instance_fixtures_was_called = True @final def tearDown(self): # We need to get this before it's deleted by the loop. stack = self._instance_teardown_stack for attr in set(vars(self)) - self._pre_setup_attrs: delattr(self, attr) stack.close() @final def enter_instance_context(self, context_manager): """ Enter a context manager that should be exited during tearDown. """ return self._instance_teardown_stack.enter_context(context_manager) @final def add_instance_callback(self, callback): """ Register a callback to be executed during tearDown. Parameters ---------- callback : callable The callback to invoke at the end of each test. """ return self._instance_teardown_stack.callback(callback)
def context(self): stack = ExitStack() for (name, context) in self.__contexts__: setattr(stack, name, stack.enter_context(context)) return stack