def test_inconsistent_delegate(self): scale_set = AzureScaleSet('eastus', 'test_rg', 'test', 'Standard_H16', 0, 'Succeeded') updated_scale_set = AzureScaleSet('eastus', 'test_rg', 'test', 'Standard_H16', 1, 'Succeeded') instance = AzureScaleSetInstance('fake_id', 'fake_vm', datetime.now()) future = CompletedFuture(None) mock_api = mock.Mock(AzureApi) mock_api.list_scale_sets = mock.Mock(return_value=[scale_set]) mock_api.list_scale_set_instances = mock.Mock(return_value=[]) mock_api.update_scale_set = mock.Mock(return_value=future) cached_api = AzureWriteThroughCachedApi(mock_api) self.assertEqual(cached_api.list_scale_sets('test_rg'), [scale_set]) self.assertEqual(cached_api.list_scale_set_instances(scale_set), []) mock_api.list_scale_sets.assert_called_once_with('test_rg') mock_api.list_scale_set_instances.assert_called_once_with(scale_set) cached_api.update_scale_set(scale_set, 1).result() mock_api.update_scale_set.assert_called_once_with(scale_set, 1) mock_api.list_scale_sets = mock.Mock(return_value=[updated_scale_set]) mock_api.list_scale_set_instances = mock.Mock(return_value=[]) self.assertEqual(cached_api.list_scale_sets('test_rg'), [updated_scale_set]) self.assertEqual(cached_api.list_scale_set_instances(updated_scale_set), []) mock_api.list_scale_sets.assert_called_once_with('test_rg') mock_api.list_scale_set_instances.assert_called_once_with(updated_scale_set) # Test that even if there is inconsistency between the list_scale_sets and list_scale_set_instances, the # cache doesn't end up with bad data mock_api.list_scale_set_instances = mock.Mock(return_value=[instance]) self.assertEqual(cached_api.list_scale_set_instances(updated_scale_set), [instance]) mock_api.list_scale_set_instances.assert_called_once_with(updated_scale_set)
def test_update(self): scale_set = AzureScaleSet('eastus', 'test_rg', 'test', 'Standard_H16', 1, 'Succeeded') updated_scale_set = AzureScaleSet('eastus', 'test_rg', 'test', 'Standard_H16', 0, 'Succeeded') instance = AzureScaleSetInstance('fake_id', 'fake_vm', datetime.now()) future = CompletedFuture(None) mock_api = mock.Mock(AzureApi) mock_api.list_scale_sets = mock.Mock(return_value=[scale_set]) mock_api.list_scale_set_instances = mock.Mock(return_value=[instance]) mock_api.update_scale_set = mock.Mock(return_value=future) cached_api = AzureWriteThroughCachedApi(mock_api) self.assertEqual(cached_api.list_scale_sets('test_rg'), [scale_set]) self.assertEqual(cached_api.list_scale_set_instances(scale_set), [instance]) cached_api.update_scale_set(scale_set, 0).result() mock_api.list_scale_sets.assert_called_once_with('test_rg') mock_api.list_scale_set_instances.assert_called_once_with(scale_set) mock_api.update_scale_set.assert_called_once_with(scale_set, 0) mock_api.list_scale_sets = mock.Mock(return_value=[updated_scale_set]) mock_api.list_scale_set_instances = mock.Mock(return_value=[]) self.assertEqual(cached_api.list_scale_sets('test_rg'), [updated_scale_set]) self.assertEqual(cached_api.list_scale_set_instances(updated_scale_set), []) mock_api.list_scale_sets.assert_called_once_with('test_rg') mock_api.list_scale_set_instances.assert_called_once_with(updated_scale_set)
def set_desired_capacity(self, new_desired_capacity): """ sets the desired capacity of the underlying ASG directly. note that this is for internal control. for scaling purposes, please use scale() instead. """ scale_out = new_desired_capacity - self.desired_capacity assert scale_out >= 0 if scale_out == 0: return CompletedFuture(False) remaining_instances = self.client.get_remaining_instances( self.resource_group, self.instance_type) futures = [] for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)): if scale_set.capacity < _SCALE_SET_SIZE_LIMIT: if self.slow_scale: new_group_capacity = scale_set.capacity + 1 else: new_group_capacity = min( _SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out, scale_set.capacity + remaining_instances) if scale_set.provisioning_state == 'Updating': logger.warn("Update of {} already in progress".format( scale_set.name)) continue if scale_set.provisioning_state == 'Failed': logger.error( "{} failed provisioning. Skipping it for scaling.". format(scale_set.name)) continue scale_out -= (new_group_capacity - scale_set.capacity) remaining_instances -= (new_group_capacity - scale_set.capacity) # Update our cached version self.scale_sets[scale_set.name].capacity = new_group_capacity futures.append( self.client.update_scale_set(scale_set, new_group_capacity)) logger.info("Scaling Azure Scale Set {} to {}".format( scale_set.name, new_group_capacity)) if scale_out == 0 or remaining_instances == 0: break if scale_out > 0: logger.error( "Not enough scale sets to reach desired capacity {} for {}". format(new_desired_capacity, self)) self.desired_capacity = new_desired_capacity - scale_out logger.info("ASG: {} new_desired_capacity: {}".format( self, new_desired_capacity)) return TransformingFuture(True, AllCompletedFuture(futures))