def test_batch_scale_pools(self, batch_pool, **kwargs):
        client = self.create_sharedkey_client(**kwargs)
        # Test Enable Autoscale
        interval = datetime.timedelta(minutes=6)
        response = client.pool.enable_auto_scale(
            batch_pool.name,
            auto_scale_formula='$TargetDedicatedNodes=2',
            auto_scale_evaluation_interval=interval)

        self.assertIsNone(response)

        # Test Evaluate Autoscale
        result = client.pool.evaluate_auto_scale(batch_pool.name, '$TargetDedicatedNodes=3')
        self.assertIsInstance(result, models.AutoScaleRun)
        self.assertEqual(
            result.results,
            '$TargetDedicatedNodes=3;$TargetLowPriorityNodes=0;$NodeDeallocationOption=requeue')
        
        # Test Disable Autoscale
        response = client.pool.disable_auto_scale(batch_pool.name)
        self.assertIsNone(response)

        # Test Pool Resize
        pool = client.pool.get(batch_pool.name)
        while self.is_live and pool.allocation_state != models.AllocationState.steady:
            time.sleep(5)
            pool = client.pool.get(batch_pool.name)
        self.assertEqual(pool.target_dedicated_nodes, 2)
        self.assertEqual(pool.target_low_priority_nodes, 0)
        params = models.PoolResizeParameter(target_dedicated_nodes=0, target_low_priority_nodes=2)
        response = client.pool.resize(batch_pool.name, params)
        self.assertIsNone(response)

        # Test Stop Pool Resize
        response = client.pool.stop_resize(batch_pool.name)
        self.assertIsNone(response)
        pool = client.pool.get(batch_pool.name)
        while self.is_live and pool.allocation_state != models.AllocationState.steady:
            time.sleep(5)
            pool = client.pool.get(batch_pool.name)
        # It looks like there has test framework issue, it couldn't find the correct recording frame
        # So in live mode, target-decicate is 0, and target low pri is 2
        self.assertEqual(pool.target_dedicated_nodes, 2)
        self.assertEqual(pool.target_low_priority_nodes, 0)

        # Test Get All Pools Lifetime Statistics
        stats = client.pool.get_all_lifetime_statistics()
        self.assertIsInstance(stats, models.PoolStatistics)
        self.assertIsNotNone(stats.resource_stats.avg_cpu_percentage)
        self.assertIsNotNone(stats.resource_stats.network_read_gi_b)
        self.assertIsNotNone(stats.resource_stats.disk_write_gi_b)
        self.assertIsNotNone(stats.resource_stats.peak_disk_gi_b)

        # Test Get Pool Usage Info
        info = list(client.pool.list_usage_metrics())
        self.assertEqual(info, [])
示例#2
0
def wait_for_steady_nodes(batch_service_client: batch.BatchExtensionsClient,
                          pool_id: str, min_required_vms: int,
                          test_timeout: datetime, stop_thread):
    """Waits for a pool to go to steady state and then for the nodes to go to idle
    
    :param batch_service_client: A batch service extensions client.
    :type batch_service_client: batch.BatchExtensionsClient
    :param pool_id: ID of the pool to wait for steady nodes in
    :type pool_id: str
    :param min_required_vms: The min number of VM's required steady in order to run the test.
    :type min_required_vms: int
    :param test_timeout: Datetime at which to fail the test and run cleanup.
    :type test_timeout: datetime
    :param stop_thread: Lambda method which returns a bool when we want to trigger the test to immediately fail and perform cleanup.
    :type stop_thread: Func['bool']
    """
    try:
        wait_for_pool_resize_operation(batch_service_client, pool_id,
                                       test_timeout, stop_thread)

    except (ex.PoolResizeFailedException):
        # double the node count and try again
        pool = batch_service_client.pool.get(pool_id)
        if pool.target_low_priority_nodes > 0:
            new_node_count = pool.target_low_priority_nodes * 2
        else:
            new_node_count = pool.target_dedicated_nodes * 2
        logger.info("Resizing pool [{}] to '{}' nodes".format(
            pool_id, new_node_count))
        batch_service_client.pool.resize(
            pool_id,
            batchmodels.PoolResizeParameter(
                target_low_priority_nodes=new_node_count))
        # if exception thrown again here, will bubble up
        wait_for_pool_resize_operation(batch_service_client, pool_id,
                                       test_timeout, stop_thread)

    pool = batch_service_client.pool.get(pool_id)
    new_node_count = 0
    # If the pool is a low priority or dedicated
    if pool.target_low_priority_nodes > 0:
        new_node_count = pool.target_low_priority_nodes * 2
    else:
        new_node_count = pool.target_dedicated_nodes * 2

    max_allowed_failed_nodes = new_node_count - min_required_vms
    wait_for_enough_idle_vms(batch_service_client, pool_id, min_required_vms,
                             max_allowed_failed_nodes,
                             pool.state_transition_time, test_timeout,
                             stop_thread)
 def expand_pool(self, size):
     """
     Resize function
     :param size: num  of new nodes to expand towards
     :return: True if successful; False otherwise.
     """
     print(f"Attempting to resize... {size}")
     try:
         self.client.pool.resize(
             pool_id=self.pool_id,
             pool_resize_parameter=batchmodels.PoolResizeParameter(
                 target_dedicated_nodes=size))
         return True
     except Exception as e:
         print(f"something went wrong in the resize! {e.with_traceback()}")
         return False
示例#4
0
    def resize_pool(self, pool_id, target_dedicated, target_low_priority):
        client = self._get_batch_client()
        pool = self.get_pool(pool_id)
        if pool:
            if pool.allocation_state == batchmodels.AllocationState.resizing:
                client.pool.stop_resize(pool_id)

                while (pool.allocation_state !=
                       batchmodels.AllocationState.steady):
                    time.sleep(15)
                    pool = self.get_pool(pool_id)

            resize_param = batchmodels.PoolResizeParameter(
                target_dedicated_nodes=target_dedicated,
                target_low_priority_nodes=target_low_priority)
            client.pool.resize(pool_id, resize_param)
    def resize_pool(self,
                    pool_id: str = None,
                    dedicated_nodes: int = 0,
                    low_pri_nodes: int = 9):

        if pool_id is None:
            pool_id = self.config["POOL"]["POOL_ID"]
        logger.info(
            f"Resizing pool {pool_id} to {low_pri_nodes} low priority nodes and {dedicated_nodes} dedicated nodes"
        )
        pool_resize_param = batchmodels.PoolResizeParameter(
            target_low_priority_nodes=low_pri_nodes,
            target_dedicated_nodes=dedicated_nodes,
        )

        self.batch_client.pool.resize(pool_id=pool_id,
                                      pool_resize_parameter=pool_resize_param)
            # keep the historical targets
            now = datetime.datetime.now()
            pool_sizes[pool_name].append(PoolDemand(now, new_target))
            # now clear timings out of range of the delay duration
            while (now - pool_sizes[pool_name][0].time) > DELAY:
                pool_sizes[pool_name].popleft()
            # now adjust the new_target to incorporate the delay
            new_target = max(new_target,
                             max([i.size for i in pool_sizes[pool_name]]))

            resized = False
            if new_target != pool_current_nodes and pool.allocation_state == batchmodels.AllocationState.steady:
                resized = True
                batch_client.pool.resize(
                    pool_name,
                    batchmodels.PoolResizeParameter(
                        target_dedicated_nodes=new_target))

            output.append({
                pool_name: {
                    "running": nrunning,
                    "ready": nready,
                    "waiting": nwaiting,
                    "current": pool_current_nodes,
                    "required": required_nodes,
                    "max": POOL_MAX_NODES,
                    "target": new_target,
                    "resized": resized
                }
            })

        if args.debug > 1: