def test_patch_returns_cleanup(self): # patch(obj, name, value) returns a nullary callable that restores obj # to its original state when run. test_object = TestObj() original = test_object.foo cleanup = patch(test_object, 'foo', 42) cleanup() self.assertEqual(original, test_object.foo)
def test_patch_returns_cleanup(self): # patch(obj, name, value) returns a nullary callable that restores obj # to its original state when run. test_object = TestObj() original = test_object.foo cleanup = patch(test_object, "foo", 42) cleanup() self.assertEqual(original, test_object.foo)
def setup_tobiko_config(conf): # Redirect all warnings to logging library logging.captureWarnings(True) warnings_logger = log.getLogger('py.warnings') if conf.debug: if not warnings_logger.isEnabledFor(log.WARNING): # Print Python warnings warnings_logger.logger.setLevel(log.WARNING) elif warnings_logger.isEnabledFor(log.WARNING): # Silence Python warnings warnings_logger.logger.setLevel(log.ERROR) tobiko.setup_fixture(HttpProxyFixture) if conf.logging.capture_log: monkey.patch(testtools, 'TestCase', tobiko.CaptureLogTest) for module_name in CONFIG_MODULES: module = importlib.import_module(module_name) if hasattr(module, 'setup_tobiko_config'): module.setup_tobiko_config(conf=conf)
def _interceptPowerTypesQuery(self): power_types = PowerDriverRegistry.get_schema( detect_missing_packages=False) @wraps(driver_parameters.get_all_power_types) def get_all_power_types(controllers=None, ignore_errors=True): # Callers can mutate this, so deep copy. return deepcopy(power_types) restore = monkey.patch(driver_parameters, "get_all_power_types", get_all_power_types) self.addCleanup(restore)
def patch(self, obj, attribute, value): """Monkey-patch 'obj.attribute' to 'value' while the test is running. If 'obj' has no attribute, then the monkey-patch will still go ahead, and the attribute will be deleted instead of restored to its original value. :param obj: The object to patch. Can be anything. :param attribute: The attribute on 'obj' to patch. :param value: The value to set 'obj.attribute' to. """ self.addCleanup(patch(obj, attribute, value))
def setUp(self): super().setUp() # First remove all observers via the legacy API. for observer in list(log.theLogPublisher.observers): self.addCleanup(log.theLogPublisher.addObserver, observer) log.theLogPublisher.removeObserver(observer) # Now remove any remaining modern observers. self.addCleanup(patch(globalLogPublisher, "_observers", [])) # Now add our observer, again via the legacy API. This ensures that # it's wrapped with whatever legacy wrapper we've installed. self.addCleanup(log.theLogPublisher.removeObserver, self.events.append) log.theLogPublisher.addObserver(self.events.append)
def setUp(self): super(MockRegionToClusterRPCFixture, self).setUp() # Ensure there's a shared-secret. self.secret = security.get_shared_secret() # We need the event-loop up and running. if not eventloop.loop.running: raise RuntimeError( "Please start the event-loop before using this fixture.") self.rpc = get_service_in_eventloop("rpc").wait(10) # The RPC service uses a defaultdict(set) to manage connections, but # let's check those assumptions. assert isinstance(self.rpc.connections, defaultdict) assert self.rpc.connections.default_factory is set # Patch a fake connections dict into place for this fixture's lifetime. self.addCleanup(patch(self.rpc, "connections", defaultdict(set)))
def setUp(self): super(ClusterRPCFixture, self).setUp() # We need the event-loop up and running. if not eventloop.loop.running: raise RuntimeError( "Please start the event-loop before using this fixture.") rpc_service = get_service_in_eventloop("rpc").wait(10) # The RPC service uses a defaultdict(set) to manage connections, but # let's check those assumptions. assert isinstance(rpc_service.connections, defaultdict) assert rpc_service.connections.default_factory is set # Populate a connections mapping with a fake connection for each # rack controller known at present. fake_connections = defaultdict(set) for controller in RackController.objects.all(): connection = FakeConnection(controller.system_id) fake_connections[connection.ident].add(connection) # Patch the fake connections into place for this fixture's lifetime. self.addCleanup(patch(rpc_service, "connections", fake_connections))
def setUp(self): super().setUp() # Ensure there's a shared-secret. self.secret = security.get_shared_secret() # We need the event-loop up and running. if not eventloop.loop.running: raise RuntimeError( "Please start the event-loop before using this fixture.") self.rpc = get_service_in_eventloop("rpc").wait(10) # The RPC service uses a defaultdict(set) to manage connections, but # let's check those assumptions. assert isinstance(self.rpc.connections, defaultdict) assert self.rpc.connections.default_factory is set # Populate a connections mapping with a fake connection for each # rack controller known at present. fake_connections = defaultdict(set) for system_id in RackController.objects.values_list("system_id", flat=True): connection = FakeConnection(system_id) fake_connections[connection.ident].add(connection) # Patch the fake connections into place for this fixture's lifetime. self.addCleanup(patch(self.rpc, "connections", fake_connections))
def setUp(self): """Start the regiond service.""" super(MAASRegionServiceFixture, self).setUp() # Force django DEBUG false. self.addCleanup(patch(settings, "DEBUG", False)) # Create a database in the PostgreSQL cluster for each database # connection configured in Django"s settings that points to the same # datadir. cluster = ClusterFixture("db", preserve=True) self.useFixture(cluster) for database in settings.DATABASES.values(): if database["HOST"] == cluster.datadir: cluster.createdb(database["NAME"]) # Setup the database for testing. This is so the database is isolated # only for this testing. self.setup_databases() self.addCleanup(self.teardown_databases) # Fork the process to have regiond run in its own process. twistd_pid = os.fork() if twistd_pid == 0: # Redirect all output to /dev/null redirect_to_devnull() # Add command line options to start twistd. sys.argv[1:] = [ "--nodaemon", "--pidfile", "", "maas-regiond", ] # Change the DEFAULT_PORT so it can run along side of the # development regiond. from maasserver import eventloop patch(eventloop, "DEFAULT_PORT", 5253) # Start twistd. try: twistd.run() except: traceback.print_exc() os._exit(2) finally: os._exit(0) else: # Add cleanup to stop the twistd service. self.addCleanup(self.stop_twistd, twistd_pid) # Check that the child process is still running after a few # seconds. This makes sure that everything started okay and it # is still running. time.sleep(2) try: os.kill(twistd_pid, 0) except OSError: # Not running. raise ServiceError( "Failed to start regiond. Check that another test is " "not running at the same time.")
def setUp(self): self.mwr = MemoryWriter() patch(sys, 'stdout', self.mwr)
def test_patch_patches(self): # patch(obj, name, value) sets obj.name to value. test_object = TestObj() patch(test_object, 'foo', 42) self.assertEqual(42, test_object.foo)
def _setUp(self): assert hasattr(config, "_gen_addresses") restore = patch(config, "_gen_addresses", self._genRandomAddresses) self.addCleanup(restore)
def setUp(self): super(TagCachedKnowledgeFixture, self).setUp() restore = patch(tags, "get_cached_knowledge", get_nodegroup_cached_knowledge) self.addCleanup(restore)
def test_patch_patches(self): # patch(obj, name, value) sets obj.name to value. test_object = TestObj() patch(test_object, "foo", 42) self.assertEqual(42, test_object.foo)
def setUp(self): super(TagCachedKnowledgeFixture, self).setUp() restore = patch( tags, "get_cached_knowledge", get_nodegroup_cached_knowledge) self.addCleanup(restore)