def test_single_workload(self): """This test exercises the auto-scaling logic in the admission controller. It spins up a base cluster (coordinator, catalog, statestore), runs some queries to observe that new executors are started, then stops the workload and observes that the cluster gets shutdown.""" GROUP_SIZE = 2 EXECUTOR_SLOTS = 3 auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS, group_size=GROUP_SIZE) workload = None try: auto_scaler.start() sleep(self.INITIAL_STARTUP_TIME_S) workload = ConcurrentWorkload(self.QUERY, num_streams=5) LOG.info("Starting workload") workload.start() # Wait for workers to spin up assert any(self._get_num_executors() >= GROUP_SIZE or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S # Wait until we admitted at least 10 queries assert any(self._get_total_admitted_queries() >= 10 or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S # Wait for second executor group to start num_expected = 2 * GROUP_SIZE assert any(self._get_num_executors() == num_expected or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Number of backends did not reach %s within %s s" % ( num_expected, self.STATE_CHANGE_TIMEOUT_S) # Wait for query rate to surpass the maximum for a single executor group plus 20% min_query_rate = 1.2 * EXECUTOR_SLOTS assert any(workload.get_query_rate() > min_query_rate or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Query rate did not surpass %s within %s s" % ( num_expected, self.STATE_CHANGE_TIMEOUT_S) LOG.info("Stopping workload") workload.stop() # Wait for workers to spin down assert any(self._get_num_executors() == 0 or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Backends did not shut down within %s s" % self.STATE_CHANGE_TIMEOUT_S finally: if workload: workload.stop() LOG.info("Stopping auto scaler") auto_scaler.stop()
def test_single_workload(self): """This test exercises the auto-scaling logic in the admission controller. It spins up a base cluster (coordinator, catalog, statestore), runs some queries to observe that new executors are started, then stops the workload and observes that the cluster gets shutdown.""" GROUP_SIZE = 2 EXECUTOR_SLOTS = 3 auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS, group_size=GROUP_SIZE) workload = None try: auto_scaler.start() sleep(self.INITIAL_STARTUP_TIME_S) workload = ConcurrentWorkload(self.QUERY, num_streams=5) LOG.info("Starting workload") workload.start() # Wait for workers to spin up cluster_size = GROUP_SIZE + 1 # +1 to include coordinator. assert any(self._get_num_backends() >= cluster_size or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S assert self.impalad_test_service.get_metric_value( "cluster-membership.executor-groups.total-healthy") >= 1 # Wait until we admitted at least 10 queries assert any(self._get_total_admitted_queries() >= 10 or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S # Wait for second executor group to start cluster_size = (2 * GROUP_SIZE) + 1 assert any(self._get_num_backends() >= cluster_size or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Number of backends did not reach %s within %s s" % ( cluster_size, self.STATE_CHANGE_TIMEOUT_S) assert self.impalad_test_service.get_metric_value( "cluster-membership.executor-groups.total-healthy") >= 2 # Wait for query rate to surpass the maximum for a single executor group plus 20% min_query_rate = 1.2 * EXECUTOR_SLOTS assert any(workload.get_query_rate() > min_query_rate or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Query rate did not surpass %s within %s s" % ( cluster_size, self.STATE_CHANGE_TIMEOUT_S) LOG.info("Stopping workload") workload.stop() # Wait for workers to spin down self.impalad_test_service.wait_for_metric_value( "cluster-membership.backends.total", 1, timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1) assert self.impalad_test_service.get_metric_value( "cluster-membership.executor-groups.total") == 0 finally: if workload: workload.stop() LOG.info("Stopping auto scaler") auto_scaler.stop()
def test_single_workload(self): """This test exercises the auto-scaling logic in the admission controller. It spins up a base cluster (coordinator, catalog, statestore), runs some queries to observe that new executors are started, then stops the workload and observes that the cluster gets shutdown.""" GROUP_SIZE = 2 EXECUTOR_SLOTS = 3 auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS, group_size=GROUP_SIZE) workload = None try: auto_scaler.start() sleep(self.INITIAL_STARTUP_TIME_S) workload = ConcurrentWorkload(self.QUERY, num_streams=5) LOG.info("Starting workload") workload.start() # Wait for workers to spin up cluster_size = GROUP_SIZE + 1 # +1 to include coordinator. assert any(self._get_num_backends() >= cluster_size or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S assert self.impalad_test_service.get_metric_value( "cluster-membership.executor-groups.total-healthy") >= 1 # Wait until we admitted at least 10 queries assert any(self._get_total_admitted_queries() >= 10 or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S single_group_query_rate = workload.get_query_rate() # Wait for second executor group to start cluster_size = (2 * GROUP_SIZE) + 1 assert any(self._get_num_backends() >= cluster_size or sleep(1) for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \ "Number of backends did not reach %s within %s s" % ( cluster_size, self.STATE_CHANGE_TIMEOUT_S) assert self.impalad_test_service.get_metric_value( "cluster-membership.executor-groups.total-healthy") >= 2 # Wait for query rate to exceed the maximum for a single executor group. In the past # we tried to wait for it to pass a higher threshold but on some platforms we saw # that it was too flaky. max_query_rate = 0 # This barrier has been flaky in the past so we wait 2x as long as for the other # checks. end = time() + 2 * self.STATE_CHANGE_TIMEOUT_S while time() < end: current_rate = workload.get_query_rate() LOG.info("Current rate: %s" % current_rate) max_query_rate = max(max_query_rate, current_rate) if max_query_rate > single_group_query_rate: break sleep(1) assert max_query_rate > single_group_query_rate, "Query rate did not exceed %s " \ "within %s s. Maximum was %s. Cluster size is %s." % (single_group_query_rate, self.STATE_CHANGE_TIMEOUT_S, max_query_rate, cluster_size) LOG.info("Stopping workload") workload.stop() # Wait for workers to spin down self.impalad_test_service.wait_for_metric_value( TOTAL_BACKENDS_METRIC_NAME, 1, timeout=self.STATE_CHANGE_TIMEOUT_S, interval=1) assert self.impalad_test_service.get_metric_value( "cluster-membership.executor-groups.total") == 0 finally: if workload: workload.stop() LOG.info("Stopping auto scaler") auto_scaler.stop()