def delete_policy(request, log, tenant_id, scaling_group_id, policy_id): """ Delete a scaling policy. If successful, no response body will be returned. """ rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.delete_policy(policy_id) return deferred
def execute_webhook(request, log, capability_version, capability_hash): """ Execute a scaling policy based the capability hash. This returns a 202 in all cases except internal server error, and does not wait for execution to finish. """ store = get_store() logl = [log] d = store.webhook_info_by_hash(log, capability_hash) def log_informational_webhook_failure(failure): failure.trap(UnrecognizedCapabilityError, CannotExecutePolicyError, NoSuchPolicyError, NoSuchScalingGroupError) logl[0].msg("Non-fatal error during webhook execution: {exc!r}", exc=failure.value) def execute_policy((tenant_id, group_id, policy_id)): bound_log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id) logl[0] = bound_log group = store.get_scaling_group(bound_log, tenant_id, group_id) return group.modify_state(partial(controller.maybe_execute_scaling_policy, bound_log, transaction_id(request), policy_id=policy_id)) d.addCallback(execute_policy) d.addErrback(log_informational_webhook_failure) d.addErrback(lambda f: logl[0].err(f, "Unhandled exception executing webhook."))
def get_webhook(request, log, tenant_id, group_id, policy_id, webhook_id): """ Get a webhook which has a name, some arbitrary metdata, and a capability URL. This data is returned in the body of the response in JSON format. Example response:: { "webhook": { "id":"{webhookId}", "name": "webhook name", "metadata": {}, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash2}, "rel": "capability" } ] } } """ def format_one_webhook(webhook_model): result = _format_webhook(webhook_id, webhook_model, tenant_id, group_id, policy_id) return {'webhook': result} rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.get_webhook(policy_id, webhook_id) deferred.addCallback(format_one_webhook) deferred.addCallback(json.dumps) return deferred
def edit_config_for_scaling_group(request, log, tenant_id, group_id, data): """ Edit the configuration for a scaling group, which includes the minimum number of entities, the maximum number of entities, global cooldown, and other metadata. This data provided in the request body in JSON format. If successful, no response body will be returned. Example request:: { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1", } } The entire schema body must be provided. """ if data['minEntities'] > data['maxEntities']: raise InvalidMinEntities("minEntities must be less than or equal to maxEntities") def _do_obey_config_change(_, group): return group.modify_state( partial(controller.obey_config_change, log, transaction_id(request), data)) rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.update_config(data).addCallback(_do_obey_config_change, rec) return deferred
def _do_obey_config_change(result): group_id = result['id'] config = result['groupConfiguration'] group = get_store().get_scaling_group(log, tenant_id, group_id) d = group.modify_state(partial(controller.obey_config_change, log, transaction_id(request), config)) return d.addCallback(lambda _: result)
def execute_webhook(request, log, capability_version, capability_hash): """ Execute a scaling policy based the capability hash. This returns a 202 in all cases except internal server error, and does not wait for execution to finish. """ store = get_store() d = store.webhook_info_by_hash(log, capability_hash) def execute_policy((tenant_id, group_id, policy_id)): group = store.get_scaling_group(log, tenant_id, group_id) # no deferred return because this doesn't wait for execution group.modify_state(partial(controller.maybe_execute_scaling_policy, log, transaction_id(request), policy_id=policy_id)) d.addCallback(execute_policy) def log_unrecognized_cap(failure): exc = failure.trap(UnrecognizedCapabilityError) log.bind(capability_hash=capability_hash, capability_version=capability_version).msg(repr(exc)) d.addErrback(log_unrecognized_cap) return d
def execute_event(self, log, event, deleted_policy_ids): """ Execute a single event :param log: A bound log for logging :param event: event dict to execute :param deleted_policy_ids: Set of policy ids that are deleted. Policy id will be added to this if its scaling group or policy has been deleted :return: a deferred with the results of execution """ tenant_id, group_id, policy_id = event['tenantId'], event['groupId'], event['policyId'] log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id) log.msg('Executing policy') group = get_store().get_scaling_group(log, tenant_id, group_id) d = group.modify_state(partial(maybe_execute_scaling_policy, log, generate_transaction_id(), policy_id=policy_id)) d.addErrback(ignore_and_log, CannotExecutePolicyError, log, 'Cannot execute policy') def collect_deleted_policy(failure): failure.trap(NoSuchScalingGroupError, NoSuchPolicyError) deleted_policy_ids.add(policy_id) d.addErrback(collect_deleted_policy) d.addErrback(log.err, 'Scheduler failed to execute policy') return d
def execute_webhook(request, log, capability_version, capability_hash): """ Execute a scaling policy based the capability hash. This returns a 202 in all cases except internal server error, and does not wait for execution to finish. """ store = get_store() logl = [log] d = store.webhook_info_by_hash(log, capability_hash) def log_informational_webhook_failure(failure): failure.trap(UnrecognizedCapabilityError, CannotExecutePolicyError, NoSuchPolicyError, NoSuchScalingGroupError) logl[0].msg("Non-fatal error during webhook execution: {exc!r}", exc=failure.value) def execute_policy((tenant_id, group_id, policy_id)): bound_log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id) logl[0] = bound_log group = store.get_scaling_group(bound_log, tenant_id, group_id) return group.modify_state( partial(controller.maybe_execute_scaling_policy, bound_log, transaction_id(request), policy_id=policy_id)) d.addCallback(execute_policy) d.addErrback(log_informational_webhook_failure) d.addErrback( lambda f: logl[0].err(f, "Unhandled exception executing webhook."))
def view_config_for_scaling_group(request, log, tenant_id, group_id): """ Get the configuration for a scaling group, which includes the minimum number of entities, the maximum number of entities, global cooldown, and other metadata. This data is returned in the body of the response in JSON format. Example response:: { "groupConfiguration": { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1", } } } """ rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.view_config() deferred.addCallback(lambda conf: json.dumps({"groupConfiguration": conf})) return deferred
def get_policy(request, log, tenant_id, scaling_group_id, policy_id): """ Get a scaling policy which describes an id, name, type, adjustment, and cooldown, and links. This data is returned in the body of the response in JSON format. Example response:: { "policy": { "id": {policyId}, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policy/{policyId}/" "rel": "self" } ], "name": "scale up by one server", "change": 1, "cooldown": 150 } } """ def openstackify(policy_dict): policy_dict['id'] = policy_id policy_dict['links'] = get_autoscale_links(tenant_id, scaling_group_id, policy_id) return {'policy': policy_dict} rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.get_policy(policy_id) deferred.addCallback(openstackify) deferred.addCallback(json.dumps) return deferred
def delete_scaling_group(request, log, tenant_id, scaling_group_id): """ Delete a scaling group if there are no entities belonging to the scaling group. If successful, no response body will be returned. """ return get_store().get_scaling_group(log, tenant_id, scaling_group_id).delete_group()
def get_policy(request, log, tenant_id, scaling_group_id, policy_id): """ Get a scaling policy which describes an id, name, type, adjustment, and cooldown, and links. This data is returned in the body of the response in JSON format. Example response:: { "policy": { "id": {policyId}, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policy/{policyId}/" "rel": "self" } ], "name": "scale up by one server", "change": 1, "cooldown": 150 } } """ def openstackify(policy_dict): policy_dict["id"] = policy_id policy_dict["links"] = get_autoscale_links(tenant_id, scaling_group_id, policy_id) return {"policy": policy_dict} rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.get_policy(policy_id) deferred.addCallback(openstackify) deferred.addCallback(json.dumps) return deferred
def edit_config_for_scaling_group(request, log, tenant_id, group_id, data): """ Edit the configuration for a scaling group, which includes the minimum number of entities, the maximum number of entities, global cooldown, and other metadata. This data provided in the request body in JSON format. If successful, no response body will be returned. Example request:: { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1", } } The entire schema body must be provided. """ if data['minEntities'] > data['maxEntities']: raise InvalidMinEntities( "minEntities must be less than or equal to maxEntities") def _do_obey_config_change(_, group): return group.modify_state( partial(controller.obey_config_change, log, transaction_id(request), data)) rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.update_config(data).addCallback(_do_obey_config_change, rec) return deferred
def resume_scaling_group(request, log, tenant_id, scaling_group_id): """ Resume a scaling group. This means that scaling policies will now get executed as usual. This is an idempotent operation - resuming an already running group does nothing. """ group = get_store().get_scaling_group(log, tenant_id, scaling_group_id) return controller.resume_scaling_group(log, transaction_id(request), group)
def _do_obey_config_change(result): group_id = result['id'] config = result['groupConfiguration'] group = get_store().get_scaling_group(log, tenant_id, group_id) d = group.modify_state( partial(controller.obey_config_change, log, transaction_id(request), config)) return d.addCallback(lambda _: result)
def pause_scaling_group(request, log, tenant_id, scaling_group_id): """ Pause a scaling group. This means that no scaling policies will get executed (execution will be rejected). This is an idempotent operation - pausing an already paused group does nothing. """ group = get_store().get_scaling_group(log, tenant_id, scaling_group_id) return controller.pause_scaling_group(log, transaction_id(request), group)
def delete_webhook(request, log, tenant_id, group_id, policy_id, webhook_id): """ Deletes a particular webhook. If successful, no response body will be returned. """ rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.delete_webhook(policy_id, webhook_id) return deferred
def fetch_and_process(self, batchsize): """ Fetch the events to be processed and process them. Also delete/update after processing them :return: a deferred that fires with list of events processed """ def process_events(events): if not len(events): return events, set() log.msg('Processing events', num_events=len(events)) deleted_policy_ids = set() def eb(failure, policy_id): failure.trap(NoSuchPolicyError, NoSuchScalingGroupError) deleted_policy_ids.add(policy_id) deferreds = [ self.execute_event(log, event).addErrback(eb, event['policyId']).addErrback(log.err) for event in events ] d = defer.gatherResults(deferreds, consumeErrors=True) return d.addCallback(lambda _: (events, deleted_policy_ids)) def update_delete_events((events, deleted_policy_ids)): """ Update events with cron entry with next trigger time Delete other events """ if not len(events): return events events_to_delete, events_to_update = [], [] for event in events: if event['cron'] and event['policyId'] not in deleted_policy_ids: event['trigger'] = next_cron_occurrence(event['cron']) events_to_update.append(event) else: events_to_delete.append(event['policyId']) log.msg('Deleting events', num_policy_ids_deleting=len(events_to_delete)) log.msg('Updating events', num_policy_ids_updating=len(events_to_update)) d = get_store().update_delete_events(events_to_delete, events_to_update) return d.addCallback(lambda _: events) # utcnow because of cass serialization issues utcnow = datetime.utcnow() log = self.log.bind(scheduler_run_id=generate_transaction_id(), utcnow=utcnow) log.msg('Checking for events') deferred = get_store().fetch_batch_of_events(utcnow, batchsize) deferred.addCallback(process_events) deferred.addCallback(update_delete_events) deferred.addErrback(log.err) return deferred
def get_scaling_group_state(request, log, tenant_id, scaling_group_id): """ Get the current state of the scaling group, including the current set of active entities, number of pending entities, and the desired number of entities. This data is returned in the body of the response in JSON format. There is no guarantee about the sort order of the list of active entities. Example response:: { "group": { "id": "{groupId}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}, "rel": "self" } ], "active": [ { "id": "{instanceId1}" "links": [ { "href": "https://dfw.servers.api.rackspacecloud.com/v2/010101/servers/{instanceId1}", "rel": "self" } ] }, { "id": "{instanceId2}" "links": [ { "href": "https://dfw.servers.api.rackspacecloud.com/v2/010101/servers/{instanceId2}", "rel": "self" } ] } ], "activeCapacity": 2, "pendingCapacity": 2, "desiredCapacity": 4, "paused": false } } """ def _format_and_stackify(state): return {"group": format_state_dict(state)} group = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = group.view_state() deferred.addCallback(_format_and_stackify) deferred.addCallback(json.dumps) return deferred
def list_all_scaling_groups(request, log, tenant_id): """ Lists all the autoscaling groups and their states per for a given tenant ID. Example response:: { "groups": [ { "id": "{groupId1}" "links": [ { "href": "https://dfw.autoscale.api.rackspacecloud.com/v1.0/010101/groups/{groupId1}" "rel": "self" } ], "active": [], "activeCapacity": 0, "pendingCapacity": 1, "desiredCapacity": 1, "paused": false }, { "id": "{groupId2}" "links": [ { "href": "https://dfw.autoscale.api.rackspacecloud.com/v1.0/010101/groups/{groupId2}", "rel": "self" } ], "active": [], "activeCapacity": 0, "pendingCapacity": 2, "desiredCapacity": 2, "paused": false } ], "groups_links": [] } TODO: """ def format_list(group_states): return { "groups": [format_state_dict(state) for state in group_states], "groups_links": [] } deferred = get_store().list_scaling_group_states(log, tenant_id) deferred.addCallback(format_list) deferred.addCallback(json.dumps) return deferred
def edit_launch_config(request, log, tenant_id, group_id, data): """ Edit the launch configuration for a scaling group, which includes the details of how to create a server, from what image, which load balancers to join it to, and what networks to add it to, and other metadata. This data provided in the request body in JSON format. If successful, no response body will be returned. Example request:: { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } } The exact update cases are still up in the air -- can the user provide a mimimal schema, and if so, what happens with defaults? Nova should validate the image before saving the new config. Users may have an invalid configuration based on dependencies. """ rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.update_launch_config(data) return deferred
def view_launch_config(request, log, tenant_id, group_id): """ Get the launch configuration for a scaling group, which includes the details of how to create a server, from what image, which load balancers to join it to, and what networks to add it to, and other metadata. This data is returned in the body of the response in JSON format. Example response:: { "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } } } """ rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.view_launch_config() deferred.addCallback( lambda conf: json.dumps({"launchConfiguration": conf})) return deferred
def execute_event(self, log, event): """ Execute a single event :param log: A bound log for logging :return: a deferred with the results of execution """ tenant_id, group_id, policy_id = event['tenantId'], event['groupId'], event['policyId'] log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id, policy_id=policy_id) log.msg('Executing policy') group = get_store().get_scaling_group(log, tenant_id, group_id) d = group.modify_state(partial(maybe_execute_scaling_policy, log, generate_transaction_id(), policy_id=policy_id)) d.addErrback(ignore_and_log, CannotExecutePolicyError, log, 'Cannot execute policy') return d
def execute_policy(request, log, tenant_id, scaling_group_id, policy_id): """ Execute this scaling policy. TBD: Response body. Example response:: {} """ group = get_store().get_scaling_group(log, tenant_id, scaling_group_id) d = group.modify_state( partial(controller.maybe_execute_scaling_policy, log, transaction_id(request), policy_id=policy_id) ) d.addCallback(lambda _: "{}") # Return value TBD return d
def view_launch_config(request, log, tenant_id, group_id): """ Get the launch configuration for a scaling group, which includes the details of how to create a server, from what image, which load balancers to join it to, and what networks to add it to, and other metadata. This data is returned in the body of the response in JSON format. Example response:: { "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } } } """ rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.view_launch_config() deferred.addCallback(lambda conf: json.dumps({"launchConfiguration": conf})) return deferred
def execute_policy(request, log, tenant_id, scaling_group_id, policy_id): """ Execute this scaling policy. TBD: Response body. Example response:: {} """ group = get_store().get_scaling_group(log, tenant_id, scaling_group_id) d = group.modify_state( partial(controller.maybe_execute_scaling_policy, log, transaction_id(request), policy_id=policy_id)) d.addCallback(lambda _: "{}") # Return value TBD return d
def update_webhook(request, log, tenant_id, group_id, policy_id, webhook_id, data): """ Update a particular webhook. A webhook may (but do not need to) include some arbitrary medata, and must include a name. If successful, no response body will be returned. Example request:: { "name": "alice", "metadata": { "notes": "this is for Alice" } } """ rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.update_webhook(policy_id, webhook_id, data) return deferred
def update_policy(request, log, tenant_id, scaling_group_id, policy_id, data): """ Updates a scaling policy. Scaling policies must include a name, type, adjustment, and cooldown. If successful, no response body will be returned. Example request:: { "name": "scale up by two servers", "change": 2, "cooldown": 150 } """ rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.update_policy(policy_id, data) return deferred
def update_delete_events((events, deleted_policy_ids)): """ Update events with cron entry with next trigger time Delete other events """ if not len(events): return events events_to_delete, events_to_update = [], [] for event in events: if event['cron'] and event['policyId'] not in deleted_policy_ids: event['trigger'] = next_cron_occurrence(event['cron']) events_to_update.append(event) else: events_to_delete.append(event['policyId']) log.msg('Deleting events', num_policy_ids_deleting=len(events_to_delete)) log.msg('Updating events', num_policy_ids_updating=len(events_to_update)) d = get_store().update_delete_events(events_to_delete, events_to_update) return d.addCallback(lambda _: events)
def list_policies(request, log, tenant_id, scaling_group_id): """ Get a list of scaling policies in the group. Each policy describes an id, name, type, adjustment, cooldown, and links. This data is returned in the body of the response in JSON format. Example response:: { "policies": [ { "id":"{policyId1}", "data": { "name": "scale up by one server", "change": 1, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId1}/" "rel": "self" } ] }, { "id": "{policyId2}", "data": { "name": "scale up ten percent", "changePercent": 10, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId2}/" "rel": "self" } ] }, { "id":"{policyId3}", "data": { "name": "scale down one server", "change": -1, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId3}/" "rel": "self" } ] }, { "id": "{policyId4}", "data": { "name": "scale down ten percent", "changePercent": -10, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId4}/" "rel": "self" } ] } ] } """ def format_policies(policy_dict): return {"policies": policy_dict_to_list(policy_dict, tenant_id, scaling_group_id), "policies_links": []} rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.list_policies() deferred.addCallback(format_policies) deferred.addCallback(json.dumps) return deferred
def list_webhooks(request, log, tenant_id, group_id, policy_id): """ Get a list of all webhooks (capability URL) associated with a particular scaling policy. This data is returned in the body of the response in JSON format. Example response:: { "webhooks": [ { "id":"{webhookId1}", "name": "alice", "metadata": { "notes": "this is for Alice" }, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId1}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash1}/, "rel": "capability" } ] }, { "id":"{webhookId2}", "name": "alice", "metadata": { "notes": "this is for Bob" }, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId2}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash2}/, "rel": "capability" } ] } ], "webhooks_links": [] } """ def format_webhooks(webhook_dict): webhook_list = [] for webhook_id, webhook_model in webhook_dict.iteritems(): webhook_list.append( _format_webhook(webhook_id, webhook_model, tenant_id, group_id, policy_id)) return { 'webhooks': webhook_list, "webhooks_links": [] } rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.list_webhooks(policy_id) deferred.addCallback(format_webhooks) deferred.addCallback(json.dumps) return deferred
def view_manifest_config_for_scaling_group(request, log, tenant_id, scaling_group_id): """ View manifested view of the scaling group configuration, including the launch configuration, and the scaling policies. This data is returned in the body of the response in JSON format. Example response:: { "group": { "id": "{groupId}", "links": [ { "href": "https://dfw.autoscale.api.rackspacecloud.com/v1.0/010101/groups/{groupId}/" "rel": "self" } ], "groupConfiguration": { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1", } }, "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } }, "scalingPolicies": [ { "id": "{policyId1}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId1}/" "rel": "self" } ], "name": "scale up by 10", "change": 10, "cooldown": 5 } { "id": "{policyId2}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId2}/" "rel": "self" } ], "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 }, { "id": "{policyId3}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId3}/" "rel": "self" } ], "name": 'set number of servers to 10', "desiredCapacity": 10, "cooldown": 3 } ] } } """ def openstack_formatting(data, uuid): data["links"] = get_autoscale_links(tenant_id, uuid) policies = [] for policy_id, policy in data["scalingPolicies"].iteritems(): policy["id"] = policy_id policy["links"] = get_autoscale_links(tenant_id, uuid, policy_id) policies.append(policy) data["scalingPolicies"] = policies return {"group": data} group = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = group.view_manifest() deferred.addCallback(openstack_formatting, group.uuid) deferred.addCallback(json.dumps) return deferred
def list_webhooks(request, log, tenant_id, group_id, policy_id): """ Get a list of all webhooks (capability URL) associated with a particular scaling policy. This data is returned in the body of the response in JSON format. Example response:: { "webhooks": [ { "id":"{webhookId1}", "name": "alice", "metadata": { "notes": "this is for Alice" }, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId1}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash1}/, "rel": "capability" } ] }, { "id":"{webhookId2}", "name": "alice", "metadata": { "notes": "this is for Bob" }, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId2}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash2}/, "rel": "capability" } ] } ], "webhooks_links": [] } """ def format_webhooks(webhook_dict): webhook_list = [] for webhook_id, webhook_model in webhook_dict.iteritems(): webhook_list.append( _format_webhook(webhook_id, webhook_model, tenant_id, group_id, policy_id)) return {'webhooks': webhook_list, "webhooks_links": []} rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.list_webhooks(policy_id) deferred.addCallback(format_webhooks) deferred.addCallback(json.dumps) return deferred
def create_webhooks(request, log, tenant_id, group_id, policy_id, data): """ Create one or many new webhooks associated with a particular scaling policy. Webhooks may (but do not need to) include some arbitrary medata, and must include a name. The response header will point to the list webhooks endpoint. An array of webhooks is provided in the request body in JSON format. Example request:: [ { "name": "alice", "metadata": { "notes": "this is for Alice" } }, { "name": "bob" } ] Example response:: { "webhooks": [ { "id":"{webhookId1}", "alice", "metadata": { "notes": "this is for Alice" }, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId1}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash1}/, "rel": "capability" } ] }, { "id":"{webhookId2}", "name": "bob", "metadata": {}, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId2}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash2}/, "rel": "capability" } ] } ] } """ def format_webhooks_and_send_redirect(webhook_dict): request.setHeader( "Location", get_autoscale_links(tenant_id, group_id, policy_id, "", format=None) ) webhook_list = [] for webhook_id, webhook_model in webhook_dict.iteritems(): webhook_list.append( _format_webhook(webhook_id, webhook_model, tenant_id, group_id, policy_id)) return {'webhooks': webhook_list} rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.create_webhooks(policy_id, data) deferred.addCallback(format_webhooks_and_send_redirect) deferred.addCallback(json.dumps) return deferred
def create_policies(request, log, tenant_id, scaling_group_id, data): """ Create one or many new scaling policies. Scaling policies must include a name, type, adjustment, and cooldown. The response header will point to the list policies endpoint. An array of scaling policies is provided in the request body in JSON format. Example request:: [ { "name": "scale up by one server", "change": 1, "cooldown": 150 }, { "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 } ] Example response:: { "policies": [ { "id": {policyId1}, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policy/{policyId1}/" "rel": "self" } ], "name": "scale up by one server", "change": 1, "cooldown": 150 }, { "id": {policyId2}, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policy/{policyId2}/" "rel": "self" } ], "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 } ] } """ def format_policies_and_send_redirect(policy_dict): request.setHeader("Location", get_autoscale_links(tenant_id, scaling_group_id, "", format=None)) policy_list = [] for policy_uuid, policy_item in policy_dict.iteritems(): policy_item["id"] = policy_uuid policy_item["links"] = get_autoscale_links(tenant_id, scaling_group_id, policy_uuid) policy_list.append(policy_item) return {"policies": policy_list} rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.create_policies(data) deferred.addCallback(format_policies_and_send_redirect) deferred.addCallback(json.dumps) return deferred
def list_policies(request, log, tenant_id, scaling_group_id): """ Get a list of scaling policies in the group. Each policy describes an id, name, type, adjustment, cooldown, and links. This data is returned in the body of the response in JSON format. Example response:: { "policies": [ { "id":"{policyId1}", "data": { "name": "scale up by one server", "change": 1, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId1}/" "rel": "self" } ] }, { "id": "{policyId2}", "data": { "name": "scale up ten percent", "changePercent": 10, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId2}/" "rel": "self" } ] }, { "id":"{policyId3}", "data": { "name": "scale down one server", "change": -1, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId3}/" "rel": "self" } ] }, { "id": "{policyId4}", "data": { "name": "scale down ten percent", "changePercent": -10, "cooldown": 150 }, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId1}/policy/{policyId4}/" "rel": "self" } ] } ] } """ def format_policies(policy_dict): return { 'policies': policy_dict_to_list(policy_dict, tenant_id, scaling_group_id), "policies_links": [] } rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.list_policies() deferred.addCallback(format_policies) deferred.addCallback(json.dumps) return deferred
def create_new_scaling_group(request, log, tenant_id, data): """ Create a new scaling group, given the general scaling group configuration, launch configuration, and optional scaling policies. This data provided in the request body in JSON format. If successful, the created group in JSON format containing id and links is returned. Example request body containing some scaling policies:: { "groupConfiguration": { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1" } }, "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } }, "scalingPolicies": [ { "name": "scale up by 10", "change": 10, "cooldown": 5 }, { "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 }, { "name": 'set number of servers to 10', "desiredCapacity": 10, "cooldown": 3 } ] } The ``scalingPolicies`` attribute can also be an empty list, or just left out entirely. Example response body to the above request:: { "group": { "id": "{groupId}", "links": [ { "href": "https://dfw.autoscale.api.rackspacecloud.com/v1.0/010101/groups/{groupId}/" "rel": "self" } ], "groupConfiguration": { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1" } }, "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } }, "scalingPolicies": [ { "id": "{policyId1}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId1}/" "rel": "self" } ], "name": "scale up by 10", "change": 10, "cooldown": 5 } { "id": "{policyId2}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId2}/" "rel": "self" } ], "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 }, { "id": "{policyId3}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId3}/" "rel": "self" } ], "name": 'set number of servers to 10', "desiredCapacity": 10, "cooldown": 3 } ] } } """ data['groupConfiguration'].setdefault('maxEntities', MAX_ENTITIES) data['groupConfiguration'].setdefault('metadata', {}) if data['groupConfiguration']['minEntities'] > data['groupConfiguration']['maxEntities']: raise InvalidMinEntities("minEntities must be less than or equal to maxEntities") deferred = get_store().create_scaling_group( log, tenant_id, data['groupConfiguration'], data['launchConfiguration'], data.get('scalingPolicies', None)) def _do_obey_config_change(result): group_id = result['id'] config = result['groupConfiguration'] group = get_store().get_scaling_group(log, tenant_id, group_id) d = group.modify_state(partial(controller.obey_config_change, log, transaction_id(request), config)) return d.addCallback(lambda _: result) deferred.addCallback(_do_obey_config_change) def _add_to_bobby(result, client): d = client.create_group(tenant_id, result["id"]) return d.addCallback(lambda _: result) bobby = get_bobby() if bobby is not None: deferred.addCallback(_add_to_bobby, bobby) def _format_output(result): uuid = result['id'] request.setHeader( "Location", get_autoscale_links(tenant_id, uuid, format=None)) result["links"] = get_autoscale_links(tenant_id, uuid) result["scalingPolicies"] = policy_dict_to_list( result["scalingPolicies"], tenant_id, uuid) return {"group": result} deferred.addCallback(_format_output) deferred.addCallback(json.dumps) return deferred
def create_new_scaling_group(request, log, tenant_id, data): """ Create a new scaling group, given the general scaling group configuration, launch configuration, and optional scaling policies. This data provided in the request body in JSON format. If successful, the created group in JSON format containing id and links is returned. Example request body containing some scaling policies:: { "groupConfiguration": { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1" } }, "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } }, "scalingPolicies": [ { "name": "scale up by 10", "change": 10, "cooldown": 5 }, { "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 }, { "name": 'set number of servers to 10', "desiredCapacity": 10, "cooldown": 3 } ] } The ``scalingPolicies`` attribute can also be an empty list, or just left out entirely. Example response body to the above request:: { "group": { "id": "{groupId}", "links": [ { "href": "https://dfw.autoscale.api.rackspacecloud.com/v1.0/010101/groups/{groupId}/" "rel": "self" } ], "groupConfiguration": { "name": "workers", "cooldown": 60, "minEntities": 5, "maxEntities": 100, "metadata": { "firstkey": "this is a string", "secondkey": "1" } }, "launchConfiguration": { "type": "launch_server", "args": { "server": { "flavorRef": 3, "name": "webhead", "imageRef": "0d589460-f177-4b0f-81c1-8ab8903ac7d8", "OS-DCF:diskConfig": "AUTO", "metadata": { "mykey": "myvalue" }, "personality": [ { "path": '/root/.ssh/authorized_keys', "contents": "ssh-rsa AAAAB3Nza...LiPk== [email protected]" } ], "networks": [ { "uuid": "11111111-1111-1111-1111-111111111111" } ], }, "loadBalancers": [ { "loadBalancerId": 2200, "port": 8081 } ] } }, "scalingPolicies": [ { "id": "{policyId1}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId1}/" "rel": "self" } ], "name": "scale up by 10", "change": 10, "cooldown": 5 } { "id": "{policyId2}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId2}/" "rel": "self" } ], "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 }, { "id": "{policyId3}", "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policies/{policyId3}/" "rel": "self" } ], "name": 'set number of servers to 10', "desiredCapacity": 10, "cooldown": 3 } ] } } """ data['groupConfiguration'].setdefault('maxEntities', MAX_ENTITIES) data['groupConfiguration'].setdefault('metadata', {}) if data['groupConfiguration']['minEntities'] > data['groupConfiguration'][ 'maxEntities']: raise InvalidMinEntities( "minEntities must be less than or equal to maxEntities") deferred = get_store().create_scaling_group( log, tenant_id, data['groupConfiguration'], data['launchConfiguration'], data.get('scalingPolicies', None)) def _do_obey_config_change(result): group_id = result['id'] config = result['groupConfiguration'] group = get_store().get_scaling_group(log, tenant_id, group_id) d = group.modify_state( partial(controller.obey_config_change, log, transaction_id(request), config)) return d.addCallback(lambda _: result) deferred.addCallback(_do_obey_config_change) def _add_to_bobby(result, client): d = client.create_group(tenant_id, result["id"]) return d.addCallback(lambda _: result) bobby = get_bobby() if bobby is not None: deferred.addCallback(_add_to_bobby, bobby) def _format_output(result): uuid = result['id'] request.setHeader("Location", get_autoscale_links(tenant_id, uuid, format=None)) result["links"] = get_autoscale_links(tenant_id, uuid) result["scalingPolicies"] = policy_dict_to_list( result["scalingPolicies"], tenant_id, uuid) return {"group": result} deferred.addCallback(_format_output) deferred.addCallback(json.dumps) return deferred
def create_webhooks(request, log, tenant_id, group_id, policy_id, data): """ Create one or many new webhooks associated with a particular scaling policy. Webhooks may (but do not need to) include some arbitrary medata, and must include a name. The response header will point to the list webhooks endpoint. An array of webhooks is provided in the request body in JSON format. Example request:: [ { "name": "alice", "metadata": { "notes": "this is for Alice" } }, { "name": "bob" } ] Example response:: { "webhooks": [ { "id":"{webhookId1}", "alice", "metadata": { "notes": "this is for Alice" }, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId1}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash1}/, "rel": "capability" } ] }, { "id":"{webhookId2}", "name": "bob", "metadata": {}, "links": [ { "href": ".../{groupId1}/policies/{policyId1}/webhooks/{webhookId2}/", "rel": "self" }, { "href": ".../execute/1/{capability_hash2}/, "rel": "capability" } ] } ] } """ def format_webhooks_and_send_redirect(webhook_dict): request.setHeader( "Location", get_autoscale_links(tenant_id, group_id, policy_id, "", format=None)) webhook_list = [] for webhook_id, webhook_model in webhook_dict.iteritems(): webhook_list.append( _format_webhook(webhook_id, webhook_model, tenant_id, group_id, policy_id)) return {'webhooks': webhook_list} rec = get_store().get_scaling_group(log, tenant_id, group_id) deferred = rec.create_webhooks(policy_id, data) deferred.addCallback(format_webhooks_and_send_redirect) deferred.addCallback(json.dumps) return deferred
def create_policies(request, log, tenant_id, scaling_group_id, data): """ Create one or many new scaling policies. Scaling policies must include a name, type, adjustment, and cooldown. The response header will point to the list policies endpoint. An array of scaling policies is provided in the request body in JSON format. Example request:: [ { "name": "scale up by one server", "change": 1, "cooldown": 150 }, { "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 } ] Example response:: { "policies": [ { "id": {policyId1}, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policy/{policyId1}/" "rel": "self" } ], "name": "scale up by one server", "change": 1, "cooldown": 150 }, { "id": {policyId2}, "links": [ { "href": "{url_root}/v1.0/010101/groups/{groupId}/policy/{policyId2}/" "rel": "self" } ], "name": 'scale down by 5.5 percent', "changePercent": -5.5, "cooldown": 6 } ] } """ def format_policies_and_send_redirect(policy_dict): request.setHeader( "Location", get_autoscale_links(tenant_id, scaling_group_id, "", format=None)) policy_list = [] for policy_uuid, policy_item in policy_dict.iteritems(): policy_item['id'] = policy_uuid policy_item['links'] = get_autoscale_links(tenant_id, scaling_group_id, policy_uuid) policy_list.append(policy_item) return {'policies': policy_list} rec = get_store().get_scaling_group(log, tenant_id, scaling_group_id) deferred = rec.create_policies(data) deferred.addCallback(format_policies_and_send_redirect) deferred.addCallback(json.dumps) return deferred