def test_kazoo_client_stops_after_supervisor(self, mock_setup_converger, mock_txkz, mock_setup_scheduler, mock_sh): """ Kazoo is stopped after supervisor stops """ config = test_config.copy() config['zookeeper'] = {'hosts': 'zk_hosts', 'threads': 20} kz_client = mock.Mock(spec=['start', 'stop']) kz_client.start.return_value = defer.succeed(None) kz_client.stop.return_value = defer.succeed(None) mock_txkz.return_value = kz_client parent = makeService(config) sd = defer.Deferred() get_supervisor().deferred_pool.add(sd) d = parent.stopService() self.assertNoResult(d) self.assertFalse(kz_client.stop.called) sd.callback(None) self.successResultOf(d) self.assertTrue(kz_client.stop.called)
def execute_launch_config(log, transaction_id, state, launch, scaling_group, delta): """ Execute a launch config some number of times. :return: Deferred """ def _update_state(pending_results): """ :param pending_results: ``list`` of tuples of ``(job_id, {'created': <job creation time>, 'jobType': [create/delete]})`` """ log.msg('updating state') for job_id in pending_results: state.add_job(job_id) if delta > 0: log.msg("Launching some servers.") supervisor = get_supervisor() deferreds = [ _Job(log, transaction_id, scaling_group, supervisor).start(launch) for i in range(delta) ] pendings_deferred = defer.gatherResults(deferreds, consumeErrors=True) pendings_deferred.addCallback(_update_state) pendings_deferred.addErrback(unwrap_first_error) return pendings_deferred
def test_authenticator(self, mock_ss, mock_ga, mock_reactor): """ Authenticator is generated and passed to SupervisorService """ self.addCleanup(lambda: set_supervisor(None)) makeService(test_config) mock_ga.assert_called_once_with(mock_reactor, test_config['identity']) self.assertIdentical(get_supervisor().authenticator, mock_ga.return_value)
def edit_launch_config(self, request, data): """ Edit the launch configuration for a scaling group, which includes the details of how to create a server, from what image, which load balancers to join it to, and what networks to add it to, and other metadata. This data provided in the request body in JSON format. If successful, no response body will be returned. Example request:: { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa A... [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } } The exact update cases are still up in the air -- can the user provide a mimimal schema, and if so, what happens with defaults? Nova should validate the image before saving the new config. Users may have an invalid configuration based on dependencies. """ rec = self.store.get_scaling_group( self.log, self.tenant_id, self.group_id) data = normalize_launch_config(data) group_schemas.validate_launch_config_servicenet(data) deferred = get_supervisor().validate_launch_config( self.log, self.tenant_id, data) deferred.addCallback(lambda _: rec.update_launch_config(data)) return deferred
def test_supervisor_service_set_by_default(self, supervisor): """ A SupervisorService service is added to the Multiservice, and set as default supervisor """ self.addCleanup(lambda: set_supervisor(None)) parent = makeService(test_config) supervisor_service = parent.getServiceNamed('supervisor') self.assertEqual(get_supervisor(), supervisor_service)
def test_health_checker_no_zookeeper(self, supervisor): """ A health checker is constructed by default with the store and kazoo health check """ self.addCleanup(lambda: set_supervisor(None)) self.assertIsNone(self.health_checker) makeService(test_config) self.assertIsNotNone(self.health_checker) self.assertEqual(self.health_checker.checks['store'], self.store.health_check) self.assertEqual(self.health_checker.checks['kazoo'], self.store.kazoo_health_check) self.assertEqual(self.health_checker.checks['supervisor'], get_supervisor().health_check)
def delete_active_servers(log, transaction_id, scaling_group, delta, state): """ Start deleting active servers Returns a list of Deferreds corresponding to deletion of a server. Each Deferred in the list gets fired when that server is deleted """ # find servers to evict servers_to_evict = find_servers_to_evict(log, state, delta) # remove all the active servers to be deleted for server in servers_to_evict: state.remove_active(server['id']) # then start deleting those servers return [get_supervisor().execute_delete_server(log, transaction_id, scaling_group, server_info) for server_info in servers_to_evict]
def delete_active_servers(log, transaction_id, scaling_group, delta, state): """ Start deleting active servers Returns a list of Deferreds corresponding to deletion of a server. Each Deferred in the list gets fired when that server is deleted """ # find servers to evict servers_to_evict = find_servers_to_evict(log, state, delta) # remove all the active servers to be deleted for server in servers_to_evict: state.remove_active(server['id']) # then start deleting those servers return [ get_supervisor().execute_delete_server(log, transaction_id, scaling_group, server_info) for server_info in servers_to_evict ]
def delete_active_servers(log, transaction_id, scaling_group, delta, state, clock=None): """ Start deleting active servers jobs """ # find servers to evict servers_to_evict = find_servers_to_evict(log, state, delta) # remove all the active servers to be deleted for server in servers_to_evict: state.remove_active(server['id']) # then start deleting those servers if not clock: from twisted.internet import reactor clock = reactor supervisor = get_supervisor() for i, server_info in enumerate(servers_to_evict): job = _DeleteJob(log, transaction_id, scaling_group, server_info, supervisor) d = deferLater(clock, i * DELETE_WAIT_INTERVAL, job.start) supervisor.deferred_pool.add(d)
def create_new_scaling_group(self, request, data): """ Create a new scaling group, given the general scaling group configuration, launch configuration, and optional scaling policies. This data provided in the request body in JSON format. If successful, the created group in JSON format containing id and links is returned. Example request body containing some scaling policies:: { "launchConfiguration": { "args": { "loadBalancers": [ { "port": 8080, "loadBalancerId": 9099 } ], "server": { "name": "autoscale_server", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "flavorRef": "2", "OS-DCF:diskConfig": "AUTO", "metadata": { "build_config": "core", "meta_key_1": "meta_value_1", "meta_key_2": "meta_value_2" }, "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" }, { "uuid": "00000000-0000-0000-0000-000000000000" } ], "personality": [ { "path": "/root/.csivh", "contents": "VGhpcyBpcyBhIHRlc3QgZmlsZS4=" } ] } }, "type": "launch_server" }, "groupConfiguration": { "maxEntities": 10, "cooldown": 360, "name": "testscalinggroup198547", "minEntities": 0, "metadata": { "gc_meta_key_2": "gc_meta_value_2", "gc_meta_key_1": "gc_meta_value_1" } }, "scalingPolicies": [ { "cooldown": 0, "type": "webhook", "name": "scale up by 1", "change": 1 } ] } The ``scalingPolicies`` attribute can also be an empty list, or just left out entirely. Example response body to the above request:: { "group": { "launchConfiguration": { "args": { "loadBalancers": [ { "port": 8080, "loadBalancerId": 9099 } ], "server": { "name": "autoscale_server", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "flavorRef": "2", "OS-DCF:diskConfig": "AUTO", "personality": [ { "path": "/root/.csivh", "contents": "VGhpcyBpcyBhIHRlc3QgZmlsZS4=" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" }, { "uuid": "00000000-0000-0000-0000-000000000000" } ], "metadata": { "build_config": "core", "meta_key_1": "meta_value_1", "meta_key_2": "meta_value_2" } } }, "type": "launch_server" }, "groupConfiguration": { "maxEntities": 10, "cooldown": 360, "name": "testscalinggroup198547", "minEntities": 0, "metadata": { "gc_meta_key_2": "gc_meta_value_2", "gc_meta_key_1": "gc_meta_value_1" } }, "state": { "active": [], "activeCapacity": 0, "desiredCapacity": 0, "paused": false, "pendingCapacity": 0, "name": "testscalinggroup198547" }, "scalingPolicies": [ { "name": "scale up by 1", "links": [ { "href": "https://ord.autoscale.api.rackspacecloud.com/ v1.0/829409/groups/6791761b-821a-4d07-820d-0b2afc7dd7f6/ policies/dceb14ac-b2b3-4f06-aac9-a5b6cd5d40e1/", "rel": "self" } ], "cooldown": 0, "type": "webhook", "id": "dceb14ac-b2b3-4f06-aac9-a5b6cd5d40e1", "change": 1 } ], "links": [ { "href": "https://ord.autoscale.api.rackspacecloud.com/ v1.0/829409/groups/6791761b-821a-4d07-820d-0b2afc7dd7f6/", "rel": "self" } ], "id": "6791761b-821a-4d07-820d-0b2afc7dd7f6" } } """ data['groupConfiguration'].setdefault('maxEntities', MAX_ENTITIES) data['groupConfiguration'].setdefault('metadata', {}) if data['groupConfiguration']['minEntities'] > data['groupConfiguration']['maxEntities']: raise InvalidMinEntities("minEntities must be less than or equal to maxEntities") deferred = get_supervisor().validate_launch_config( self.log, self.tenant_id, data['launchConfiguration']) deferred.addCallback( lambda _: self.store.create_scaling_group(self.log, self.tenant_id, data['groupConfiguration'], data['launchConfiguration'], data.get('scalingPolicies', None))) def _do_obey_config_change(result): group_id = result['id'] config = result['groupConfiguration'] group = self.store.get_scaling_group(self.log, self.tenant_id, group_id) d = group.modify_state(partial(controller.obey_config_change, self.log, transaction_id(request), config)) return d.addCallback(lambda _: result) deferred.addCallback(_do_obey_config_change) def _add_to_bobby(result, client): d = client.create_group(self.tenant_id, result['id']) return d.addCallback(lambda _: result) bobby = get_bobby() if bobby is not None: deferred.addCallback(_add_to_bobby, bobby) def _format_output(result): uuid = result['id'] result["state"] = format_state_dict(result["state"]) request.setHeader( "Location", get_autoscale_links(self.tenant_id, uuid, format=None)) result["links"] = get_autoscale_links(self.tenant_id, uuid) linkify_policy_list(result['scalingPolicies'], self.tenant_id, uuid) result['scalingPolicies_links'] = get_policies_links( result['scalingPolicies'], self.tenant_id, uuid, rel='policies') return {"group": result} deferred.addCallback(_format_output) deferred.addCallback(json.dumps) return deferred
def create_new_scaling_group(self, request, data): """ Create a new scaling group, given the general scaling group configuration, launch configuration, and optional scaling policies. This data provided in the request body in JSON format. If successful, the created group in JSON format containing id and links is returned. Example request body containing some scaling policies:: { "launchConfiguration": { "args": { "loadBalancers": [ { "port": 8080, "loadBalancerId": 9099 } ], "server": { "name": "autoscale_server", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "flavorRef": "2", "OS-DCF:diskConfig": "AUTO", "metadata": { "meta_key_1": "meta_value_1", "meta_key_2": "meta_value_2" }, "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" }, { "uuid": "00000000-0000-0000-0000-000000000000" } ], "personality": [ { "path": "/root/.csivh", "contents": "VGhpcyBpcyBhIHRlc3QgZmlsZS4=" } ] } }, "type": "launch_server" }, "groupConfiguration": { "maxEntities": 10, "cooldown": 360, "name": "testscalinggroup198547", "minEntities": 0, "metadata": { "gc_meta_key_2": "gc_meta_value_2", "gc_meta_key_1": "gc_meta_value_1" } }, "scalingPolicies": [ { "cooldown": 0, "type": "webhook", "name": "scale up by 1", "change": 1 } ] } The ``scalingPolicies`` attribute can also be an empty list, or just left out entirely. Example response body to the above request:: { "group": { "launchConfiguration": { "args": { "loadBalancers": [ { "port": 8080, "loadBalancerId": 9099 } ], "server": { "name": "autoscale_server", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "flavorRef": "2", "OS-DCF:diskConfig": "AUTO", "personality": [ { "path": "/root/.csivh", "contents": "VGhpcyBpcyBhIHRlc3QgZmlsZS4=" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" }, { "uuid": "00000000-0000-0000-0000-000000000000" } ], "metadata": { "meta_key_1": "meta_value_1", "meta_key_2": "meta_value_2" } } }, "type": "launch_server" }, "groupConfiguration": { "maxEntities": 10, "cooldown": 360, "name": "testscalinggroup198547", "minEntities": 0, "metadata": { "gc_meta_key_2": "gc_meta_value_2", "gc_meta_key_1": "gc_meta_value_1" } }, "state": { "active": [], "activeCapacity": 0, "desiredCapacity": 0, "paused": false, "pendingCapacity": 0, "name": "testscalinggroup198547" }, "scalingPolicies": [ { "name": "scale up by 1", "links": [ { "href": "https://ord.autoscale.api.rackspacecloud.com/ v1.0/829409/groups/6791761b-821a-4d07-820d-0b2afc7dd7f6/ policies/dceb14ac-b2b3-4f06-aac9-a5b6cd5d40e1/", "rel": "self" } ], "cooldown": 0, "type": "webhook", "id": "dceb14ac-b2b3-4f06-aac9-a5b6cd5d40e1", "change": 1 } ], "links": [ { "href": "https://ord.autoscale.api.rackspacecloud.com/ v1.0/829409/groups/6791761b-821a-4d07-820d-0b2afc7dd7f6/", "rel": "self" } ], "id": "6791761b-821a-4d07-820d-0b2afc7dd7f6" } } """ group_cfg = data['groupConfiguration'] group_cfg.setdefault('maxEntities', MAX_ENTITIES) group_cfg.setdefault('metadata', {}) if group_cfg['minEntities'] > group_cfg['maxEntities']: raise InvalidMinEntities( "minEntities must be less than or equal to maxEntities") if data['launchConfiguration']['type'] == 'launch_server': validate_launch_config_servicenet(data['launchConfiguration']) deferred = get_supervisor().validate_launch_config( self.log, self.tenant_id, data['launchConfiguration']) deferred.addCallback( lambda _: self.store.create_scaling_group( self.log, self.tenant_id, group_cfg, normalize_launch_config(data['launchConfiguration']), data.get('scalingPolicies', None))) def _do_obey_config_change(result): group_id = result['id'] config = result['groupConfiguration'] launch = result['launchConfiguration'] group = self.store.get_scaling_group( self.log, self.tenant_id, group_id) log = self.log.bind(scaling_group_id=group_id) d = controller.modify_and_trigger( self.dispatcher, group, bound_log_kwargs(log), partial( controller.obey_config_change, log, transaction_id(request), config, launch_config=launch), modify_state_reason='create_new_scaling_group') return d.addCallback(lambda _: result) deferred.addCallback(_do_obey_config_change) def _add_to_bobby(result, client): d = client.create_group(self.tenant_id, result['id']) return d.addCallback(lambda _: result) bobby = get_bobby() if bobby is not None: deferred.addCallback(_add_to_bobby, bobby) def _format_output(result): uuid = result['id'] result["state"] = format_state_dict(result["state"]) request.setHeader( "Location", get_autoscale_links(self.tenant_id, uuid, format=None)) result["links"] = get_autoscale_links(self.tenant_id, uuid) linkify_policy_list( result['scalingPolicies'], self.tenant_id, uuid) result['scalingPolicies_links'] = get_policies_links( result['scalingPolicies'], self.tenant_id, uuid, rel='policies') return {"group": result} deferred.addCallback(_format_output) deferred.addCallback(json.dumps) return deferred