def _do_teardown(self, mode=None): """Shut down the sharded cluster.""" self.logger.info("Stopping all members of the sharded cluster...") running_at_start = self.is_running() if not running_at_start: self.logger.warning( "All members of the sharded cluster were expected to be running, " "but weren't.") # If we're killing or aborting to archive data files, stopping the balancer will execute # server commands that might lead to on-disk changes from the point of failure. if self.enable_balancer and mode not in (interface.TeardownMode.KILL, interface.TeardownMode.ABORT): self.stop_balancer() teardown_handler = interface.FixtureTeardownHandler(self.logger) if self.configsvr is not None: teardown_handler.teardown(self.configsvr, "config server", mode=mode) for mongos in self.mongos: teardown_handler.teardown(mongos, "mongos", mode=mode) for shard in self.shards: teardown_handler.teardown(shard, "shard", mode=mode) if teardown_handler.was_successful(): self.logger.info( "Successfully stopped all members of the sharded cluster.") else: self.logger.error("Stopping the sharded cluster fixture failed.") raise errors.ServerFailure(teardown_handler.get_error_message())
def _do_teardown(self, mode=None): self.logger.info("Stopping all members of the replica set...") running_at_start = self.is_running() if not running_at_start: self.logger.info( "All members of the replica set were expected to be running, " "but weren't.") teardown_handler = interface.FixtureTeardownHandler(self.logger) if self.initial_sync_node: teardown_handler.teardown(self.initial_sync_node, "initial sync node", mode=mode) # Terminate the secondaries first to reduce noise in the logs. for node in reversed(self.nodes): teardown_handler.teardown(node, "replica set member on port %d" % node.port, mode=mode) if teardown_handler.was_successful(): self.logger.info( "Successfully stopped all members of the replica set.") else: self.logger.error("Stopping the replica set fixture failed.") raise errors.ServerFailure(teardown_handler.get_error_message())
def _do_teardown(self, mode=None): """Shut down the replica sets.""" self.logger.info("Stopping all replica sets...") running_at_start = self.is_running() if not running_at_start: self.logger.warning( "Donor replica set expected to be running, but wasn't.") teardown_handler = interface.FixtureTeardownHandler(self.logger) # Don't take the lock because we don't expect teardown to be called while the # ContinuousShardSplit hook is running, which is the only thing that can modify # self.fixtures. Tearing down may take a long time, so taking the lock during that process # might result in hangs in other functions which need to take the lock. for fixture in reversed(self.fixtures): type_name = f"replica set '{fixture.replset_name}'" if _is_replica_set_fixture( fixture) else f"standalone on port {fixture.port}" teardown_handler.teardown(fixture, type_name, mode=mode) if teardown_handler.was_successful(): self.logger.info( "Successfully stopped donor replica set and all recipient nodes." ) else: self.logger.error("Stopping the fixture failed.") raise self.fixturelib.ServerFailure( teardown_handler.get_error_message())
def _do_teardown(self, mode=None): """Shut down the clusters and the replicator.""" running_at_start = self.is_running() if not running_at_start: self.logger.warning( "All clusters and replicators were expected to be running, but weren't." ) teardown_handler = interface.FixtureTeardownHandler(self.logger) self.logger.info("Stopping the replicator...") teardown_handler.teardown(self.replicator, "replicator", mode=mode) self.logger.info("Stopped the replicator...") self.logger.info("Stopping all clusters...") for i, cluster in enumerate(self.clusters): teardown_handler.teardown(cluster, f"cluster {i}", mode=mode) if teardown_handler.was_successful(): self.logger.info( "Successfully stopped all clusters and replicators.") else: self.logger.error("Stopping the fixture failed.") raise self.fixturelib.ServerFailure( teardown_handler.get_error_message())
def test_teardown_error(self): handler = interface.FixtureTeardownHandler( logging.getLogger("handler_unittests")) # Failing teardown. ko_fixture = UnitTestFixture(should_raise=True) handler.teardown(ko_fixture, "ko") # After failed teardown. self.assertFalse(handler.was_successful()) expected_msg = "Error while stopping ko: " + UnitTestFixture.ERROR_MESSAGE self.assertEqual(expected_msg, handler.get_error_message())
def test_teardown_ok(self): handler = interface.FixtureTeardownHandler( logging.getLogger("handler_unittests")) # Before any teardown. self.assertTrue(handler.was_successful()) self.assertIsNone(handler.get_error_message()) # Successful teardown. ok_fixture = UnitTestFixture(should_raise=False) handler.teardown(ok_fixture, "ok") # After successful teardown. self.assertTrue(handler.was_successful()) self.assertIsNone(handler.get_error_message())
def _do_teardown(self, mode=None): """Shut down the replica sets.""" self.logger.info("Stopping all replica sets...") running_at_start = self.is_running() if not running_at_start: self.logger.warning("All replica sets were expected to be running, but weren't.") teardown_handler = interface.FixtureTeardownHandler(self.logger) for replica_set in self.replica_sets: teardown_handler.teardown(replica_set, "replica_set", mode=mode) if teardown_handler.was_successful(): self.logger.info("Successfully stopped all replica sets.") else: self.logger.error("Stopping the fixture failed.") raise errors.ServerFailure(teardown_handler.get_error_message())