Exemplo n.º 1
0
    def test_single_workload(self):
        """This test exercises the auto-scaling logic in the admission controller. It spins up
    a base cluster (coordinator, catalog, statestore), runs a workload to initiate a
    scaling up event as the queries start queuing, then stops the workload and observes
    that the cluster gets shutdown."""
        GROUP_SIZE = 2
        EXECUTOR_SLOTS = 3
        auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS,
                                 group_size=GROUP_SIZE)
        workload = None
        try:
            auto_scaler.start()
            sleep(self.INITIAL_STARTUP_TIME_S)

            workload = ConcurrentWorkload(self.QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            # Wait for workers to spin up
            cluster_size = GROUP_SIZE + 1  # +1 to include coordinator.
            assert any(self._get_num_backends() >= cluster_size or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total-healthy") >= 1

            # Wait until we admitted at least 10 queries
            assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S
            # Wait for second executor group to start
            cluster_size = (2 * GROUP_SIZE) + 1
            assert any(self._get_num_backends() >= cluster_size or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                           "Number of backends did not reach %s within %s s" % (
                           cluster_size, self.STATE_CHANGE_TIMEOUT_S)
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total-healthy") >= 2

            LOG.info("Stopping workload")
            workload.stop()

            # Wait for workers to spin down
            self.impalad_test_service.wait_for_metric_value(
                TOTAL_BACKENDS_METRIC_NAME,
                1,
                timeout=self.STATE_CHANGE_TIMEOUT_S,
                interval=1)
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total") == 0

        finally:
            if workload:
                workload.stop()
            LOG.info("Stopping auto scaler")
            auto_scaler.stop()
Exemplo n.º 2
0
    def test_sequential_startup(self):
        """This test starts an executor group sequentially and observes that no queries are
    admitted until the group has been fully started."""
        # Larger groups size so it takes a while to start up
        GROUP_SIZE = 4
        EXECUTOR_SLOTS = 3
        auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS,
                                 group_size=GROUP_SIZE,
                                 start_batch_size=1,
                                 max_groups=1)
        workload = None
        try:
            auto_scaler.start()
            sleep(self.INITIAL_STARTUP_TIME_S)

            workload = ConcurrentWorkload(self.QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            # Wait for first executor to start up
            assert any(self._get_num_executors() >= 1 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S

            # Wait for remaining executors to start up and make sure that no queries are
            # admitted during startup
            end_time = time() + self.STATE_CHANGE_TIMEOUT_S
            startup_complete = False
            while time() < end_time:
                num_admitted = self._get_total_admitted_queries()
                num_backends = self._get_num_executors()
                if num_backends < GROUP_SIZE:
                    assert num_admitted == 0, "%s/%s backends started but %s queries have " \
                        "already been admitted." % (num_backends, GROUP_SIZE, num_admitted)
                if num_admitted > 0:
                    assert num_backends == GROUP_SIZE
                    startup_complete = True
                    break
                sleep(1)

            assert startup_complete, "Did not start up in %s s" % self.STATE_CHANGE_TIMEOUT_S

            LOG.info("Stopping workload")
            workload.stop()

            # Wait for workers to spin down
            assert any(self._get_num_executors() == 0 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Backends did not shut down within %s s" % self.STATE_CHANGE_TIMEOUT_S

        finally:
            if workload:
                workload.stop()
            LOG.info("Stopping auto scaler")
            auto_scaler.stop()
Exemplo n.º 3
0
    def test_single_group_maxed_out(self):
        """This test starts an auto scaler and limits it to a single executor group. It then
    makes sure that the query throughput does not exceed the expected limit."""
        GROUP_SIZE = 2
        EXECUTOR_SLOTS = 3
        auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS,
                                 group_size=GROUP_SIZE,
                                 max_groups=1)
        workload = None
        try:
            auto_scaler.start()
            sleep(self.INITIAL_STARTUP_TIME_S)

            workload = ConcurrentWorkload(self.QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            # Wait for workers to spin up
            assert any(self._get_num_executors() >= GROUP_SIZE or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S

            # Wait until we admitted at least 10 queries
            assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S

            # Sample the number of running queries for while
            SAMPLE_NUM_RUNNING_S = 30
            end_time = time() + SAMPLE_NUM_RUNNING_S
            num_running = []
            while time() < end_time:
                num_running.append(self._get_num_running_queries())
                sleep(1)

            # Must reach EXECUTOR_SLOTS but not exceed it
            assert max(num_running) == EXECUTOR_SLOTS, \
                "Unexpected number of running queries: %s" % num_running

            # Check that only a single group started
            assert self._get_num_executors() == GROUP_SIZE

            LOG.info("Stopping workload")
            workload.stop()

            # Wait for workers to spin down
            assert any(self._get_num_executors() == 0 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Backends did not shut down within %s s" % self.STATE_CHANGE_TIMEOUT_S

        finally:
            if workload:
                workload.stop()
            LOG.info("Stopping auto scaler")
            auto_scaler.stop()
Exemplo n.º 4
0
  def test_executor_concurrency(self):
    """Tests that the command line flag to limit query concurrency on executors works as
    expected."""
    # Query that runs on every executor
    QUERY = "select * from functional_parquet.alltypestiny \
             where month < 3 and id + random() < sleep(500);"
    self._add_executor_group("group1", 2, max_concurrent_queries=3)

    workload = None
    try:
      workload = ConcurrentWorkload(QUERY, num_streams=5)
      LOG.info("Starting workload")
      workload.start()

      RAMP_UP_TIMEOUT_S = 60
      # Wait until we admitted at least 10 queries
      assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
                  for _ in range(RAMP_UP_TIMEOUT_S)), \
          "Did not admit enough queries within %s s" % RAMP_UP_TIMEOUT_S

      # Sample the number of running queries for while
      NUM_RUNNING_SAMPLES = 30
      num_running = []
      for _ in xrange(NUM_RUNNING_SAMPLES):
        num_running.append(self._get_num_running_queries())
        sleep(1)

      # Must reach 3 but not exceed it
      assert max(num_running) == 3, \
          "Unexpected number of running queries: %s" % num_running

    finally:
      LOG.info("Stopping workload")
      if workload:
        workload.stop()
Exemplo n.º 5
0
    def test_executor_concurrency(self):
        """Tests that the command line flag to limit query concurrency on executors works as
    expected."""
        # Query that runs on every executor
        QUERY = "select * from functional_parquet.alltypestiny \
             where month < 3 and id + random() < sleep(500);"

        self._add_executor_group("group1", 2, admission_control_slots=3)

        workload = None
        try:
            workload = ConcurrentWorkload(QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            RAMP_UP_TIMEOUT_S = 60
            # Wait until we admitted at least 10 queries
            assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
                        for _ in range(RAMP_UP_TIMEOUT_S)), \
                "Did not admit enough queries within %s s" % RAMP_UP_TIMEOUT_S

            # Sample the number of admitted queries on each backend for while.
            # Note that the total number of queries in the cluster can higher
            # than 3 because resources may be released on some backends, allowing
            # a new query to fit (see IMPALA-9073).
            NUM_SAMPLES = 30
            executor_slots_in_use = []
            for _ in xrange(NUM_SAMPLES):
                backends_json = json.loads(
                    self.impalad_test_service.read_debug_webpage(
                        'backends?json'))
                for backend in backends_json['backends']:
                    if backend['is_executor']:
                        executor_slots_in_use.append(
                            backend['admission_slots_in_use'])
                sleep(1)

            # Must reach 3 but not exceed it
            assert max(executor_slots_in_use) == 3, \
                "Unexpected number of slots in use: %s" % executor_slots_in_use

        finally:
            LOG.info("Stopping workload")
            if workload:
                workload.stop()
Exemplo n.º 6
0
    def test_sequential_startup(self):
        """This test starts an executor group sequentially and observes that no queries are
    admitted until the group has been fully started."""
        # Larger groups size so it takes a while to start up
        GROUP_SIZE = 4
        EXECUTOR_SLOTS = 3
        auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS,
                                 group_size=GROUP_SIZE,
                                 start_batch_size=1,
                                 max_groups=1)
        workload = None
        try:
            auto_scaler.start()
            sleep(self.INITIAL_STARTUP_TIME_S)

            workload = ConcurrentWorkload(self.QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            # Wait for first executor to start up
            self.impalad_test_service.wait_for_metric_value(
                "cluster-membership.executor-groups.total",
                1,
                timeout=self.STATE_CHANGE_TIMEOUT_S,
                interval=1)

            # Wait for remaining executors to start up and make sure that no queries are
            # admitted during startup
            end_time = time() + self.STATE_CHANGE_TIMEOUT_S
            startup_complete = False
            cluster_size = GROUP_SIZE + 1  # +1 to include coordinator.
            while time() < end_time:
                num_admitted = self._get_total_admitted_queries()
                num_backends = self._get_num_backends()
                if num_backends < cluster_size:
                    assert num_admitted == 0, "%s/%s backends started but %s queries have " \
                        "already been admitted." % (num_backends, cluster_size, num_admitted)
                if num_admitted > 0:
                    assert num_backends == cluster_size
                    startup_complete = True
                    break
                sleep(1)

            assert startup_complete, "Did not start up in %s s" % self.STATE_CHANGE_TIMEOUT_S

            LOG.info("Stopping workload")
            workload.stop()

            # Wait for workers to spin down
            self.impalad_test_service.wait_for_metric_value(
                "cluster-membership.backends.total",
                1,
                timeout=self.STATE_CHANGE_TIMEOUT_S,
                interval=1)
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total") == 0

        finally:
            if workload:
                workload.stop()
            LOG.info("Stopping auto scaler")
            auto_scaler.stop()
Exemplo n.º 7
0
    def test_single_group_maxed_out(self):
        """This test starts an auto scaler and limits it to a single executor group. It then
    makes sure that the query throughput does not exceed the expected limit."""
        GROUP_SIZE = 2
        EXECUTOR_SLOTS = 3
        auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS,
                                 group_size=GROUP_SIZE,
                                 max_groups=1)
        workload = None
        try:
            auto_scaler.start()
            sleep(self.INITIAL_STARTUP_TIME_S)

            workload = ConcurrentWorkload(self.QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            # Wait for workers to spin up
            cluster_size = GROUP_SIZE + 1  # +1 to include coordinator.
            self.impalad_test_service.wait_for_metric_value(
                "cluster-membership.backends.total",
                cluster_size,
                timeout=self.STATE_CHANGE_TIMEOUT_S,
                interval=1)

            # Wait until we admitted at least 10 queries
            assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S

            # Sample the number of running queries for while
            SAMPLE_NUM_RUNNING_S = 30
            end_time = time() + SAMPLE_NUM_RUNNING_S
            num_running = []
            while time() < end_time:
                num_running.append(self._get_num_running_queries())
                sleep(1)

            # Must reach EXECUTOR_SLOTS but not exceed it
            assert max(num_running) == EXECUTOR_SLOTS, \
                "Unexpected number of running queries: %s" % num_running

            # Check that only a single group started
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total-healthy") == 1

            LOG.info("Stopping workload")
            workload.stop()

            # Wait for workers to spin down
            self.impalad_test_service.wait_for_metric_value(
                "cluster-membership.backends.total",
                1,
                timeout=self.STATE_CHANGE_TIMEOUT_S,
                interval=1)
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total") == 0

        finally:
            if workload:
                workload.stop()
            LOG.info("Stopping auto scaler")
            auto_scaler.stop()
Exemplo n.º 8
0
    def test_single_workload(self):
        """This test exercises the auto-scaling logic in the admission controller. It spins up
    a base cluster (coordinator, catalog, statestore), runs some queries to observe that
    new executors are started, then stops the workload and observes that the cluster gets
    shutdown."""
        GROUP_SIZE = 2
        EXECUTOR_SLOTS = 3
        auto_scaler = AutoScaler(executor_slots=EXECUTOR_SLOTS,
                                 group_size=GROUP_SIZE)
        workload = None
        try:
            auto_scaler.start()
            sleep(self.INITIAL_STARTUP_TIME_S)

            workload = ConcurrentWorkload(self.QUERY, num_streams=5)
            LOG.info("Starting workload")
            workload.start()

            # Wait for workers to spin up
            cluster_size = GROUP_SIZE + 1  # +1 to include coordinator.
            assert any(self._get_num_backends() >= cluster_size or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Number of backends did not increase within %s s" % self.STATE_CHANGE_TIMEOUT_S
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total-healthy") >= 1

            # Wait until we admitted at least 10 queries
            assert any(self._get_total_admitted_queries() >= 10 or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                "Did not admit enough queries within %s s" % self.STATE_CHANGE_TIMEOUT_S
            single_group_query_rate = workload.get_query_rate()
            # Wait for second executor group to start
            cluster_size = (2 * GROUP_SIZE) + 1
            assert any(self._get_num_backends() >= cluster_size or sleep(1)
                       for _ in range(self.STATE_CHANGE_TIMEOUT_S)), \
                           "Number of backends did not reach %s within %s s" % (
                           cluster_size, self.STATE_CHANGE_TIMEOUT_S)
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total-healthy") >= 2

            # Wait for query rate to exceed the maximum for a single executor group. In the past
            # we tried to wait for it to pass a higher threshold but on some platforms we saw
            # that it was too flaky.
            max_query_rate = 0
            # This barrier has been flaky in the past so we wait 2x as long as for the other
            # checks.
            end = time() + 2 * self.STATE_CHANGE_TIMEOUT_S
            while time() < end:
                current_rate = workload.get_query_rate()
                LOG.info("Current rate: %s" % current_rate)
                max_query_rate = max(max_query_rate, current_rate)
                if max_query_rate > single_group_query_rate:
                    break
                sleep(1)

            assert max_query_rate > single_group_query_rate, "Query rate did not exceed %s " \
                "within %s s. Maximum was %s. Cluster size is %s." % (single_group_query_rate,
                self.STATE_CHANGE_TIMEOUT_S, max_query_rate, cluster_size)

            LOG.info("Stopping workload")
            workload.stop()

            # Wait for workers to spin down
            self.impalad_test_service.wait_for_metric_value(
                TOTAL_BACKENDS_METRIC_NAME,
                1,
                timeout=self.STATE_CHANGE_TIMEOUT_S,
                interval=1)
            assert self.impalad_test_service.get_metric_value(
                "cluster-membership.executor-groups.total") == 0

        finally:
            if workload:
                workload.stop()
            LOG.info("Stopping auto scaler")
            auto_scaler.stop()