class InstanceHook(resource.Resource): resource_key = 'scaling_instance_hook' resources_key = 'scaling_instance_hooks' base_path = '/scaling_instance_hook/%(scaling_group_id)s/callback' allow_update = True service = auto_scaling_service.AutoScalingService() scaling_group_id = resource.URI('scaling_group_id') lifecycle_action_key = resource.Body('lifecycle_action_key') instance_id = resource.Body('instance_id') lifecycle_hook_name = resource.Body('lifecycle_hook_name') lifecycle_action_result = resource.Body('lifecycle_action_result') def call_back(self, session, **attrs): request = self._prepare_request(requires_id=False) endpoint_override = self.service.get_endpoint_override() response = session.put(request.uri, endpoint_filter=self.service, endpoint_override=endpoint_override, json=attrs, headers=request.headers) if not response.status_code == 204: _logger.debug( 'request AS service lifecycle hook call back url is %s response code is %s ' % (response.url, response.status_code)) raise exceptions.InvalidRequest( "Request AS service lifecycle hook call back %s failed" % request.uri) else: return self
class DeleteNotification(resource.Resource): service = auto_scaling_service.AutoScalingService() allow_delete = True topic_urn = resource.URI('topic_urn') scaling_group_id = resource.URI('scaling_group_id') base_path = '/scaling_notification/%(scaling_group_id)s/%(topic_urn)s' def delete_notification(self, session): request = self._prepare_request(requires_id=False) endpoint_override = self.service.get_endpoint_override() response = session.delete(request.uri, endpoint_filter=self.service, endpoint_override=endpoint_override, headers=request.headers) if not response.status_code == 204: _logger.debug( 'failed request AS service delete notification url is %s response code is %s ' % (response.url, response.status_code)) raise exceptions.InvalidRequest( "Request AS service delete notification %s failed" % request.uri) else: return self
def __init__(self, plugins=None): """User preference for each service. :param plugins: List of entry point namespaces to load. Create a new :class:`~openstack.profile.Profile` object with no preferences defined, but knowledge of the services. Services are identified by their service type, e.g.: 'identity', 'compute', etc. """ self._services = {} self._add_service(anti_ddos_service.AntiDDosService(version="v1")) self._add_service(block_store_service.BlockStoreService(version="v2")) self._add_service(compute_service.ComputeService(version="v2")) self._add_service(cts_service.CTSService(version="v1")) self._add_service(dms_service.DMSService(version="v1")) self._add_service(identity_service.IdentityService(version="v3")) self._add_service(image_service.ImageService(version="v2")) self._add_service(kms_service.KMSService(version="v1")) self._add_service(maas_service.MaaSService(version="v1")) self._add_service(network_service.NetworkService(version="v2.0")) self._add_service( orchestration_service.OrchestrationService(version="v1")) self._add_service(smn_service.SMNService(version="v2")) # QianBiao.NG HuaWei Services self._add_service(dns_service.DNSService(version="v2")) self._add_service(cloud_eye_service.CloudEyeService(version="v1")) ass = auto_scaling_service.AutoScalingService(version="v1") self._add_service(ass) vbs_v2 = volume_backup_service.VolumeBackupService(version="v2") self._add_service(vbs_v2) self._add_service(map_reduce_service.MapReduceService(version="v1")) self._add_service(evs_service.EvsServiceV2_1(version='v2.1')) self._add_service(evs_service.EvsService(version='v2')) self._add_service(ecs_service.EcsService(version='v1')) self._add_service(ecs_service.EcsServiceV1_1(version='v1.1')) self._add_service(vpc_service.VpcService(version='v2.0')) self._add_service(bms_service.BmsService(version='v1')) self._add_service(lb_service.LoadBalancerService(version='v1')) # not support below service # self._add_service(message_service.MessageService(version="v1")) # self._add_service(cluster_service.ClusterService(version="v1")) # self._add_service(database_service.DatabaseService(version="v1")) # self._add_service(alarm_service.AlarmService(version="v2")) # self._add_service(bare_metal_service.BareMetalService(version="v1")) # self._add_service(key_manager_service.KeyManagerService(version="v1")) # self._add_service( # object_store_service.ObjectStoreService(version="v1")) self._add_service(rds_service.RDSService(version="v1")) self._add_service(cdn_service.CDNService(version='v1')) # self._add_service(rds_os_service.RDSService(version="v1")) # self._add_service(telemetry_service.TelemetryService(version="v2")) # self._add_service(workflow_service.WorkflowService(version="v2")) if plugins: for plugin in plugins: self._load_plugin(plugin) self.service_keys = sorted(self._services.keys())
class Config(resource.Resource): resource_key = 'scaling_configuration' resources_key = 'scaling_configurations' base_path = '/scaling_configuration' query_marker_key = 'start_number' service = auto_scaling_service.AutoScalingService() # capabilities allow_create = True allow_list = True allow_get = True allow_delete = True _query_mapping = resource.QueryParameters( 'image_id', 'limit', name='scaling_configuration_name', marker=query_marker_key) #: Properties #: AutoScaling config ID id = resource.Body('scaling_configuration_id') #: AutoScaling config name name = resource.Body('scaling_configuration_name') #: AutoScaling config created time create_time = resource.Body('create_time') #: AutoScaling config status status = resource.Body('status') #: Use the exists instance as template to create new instance instance_config = resource.Body('instance_config', default={}, type=InstanceConfig) @classmethod def get_next_marker(cls, response_json, yielded, query_params): return get_next_marker(response_json, yielded) def batch_delete(self, session, configs): """batch delete auto-scaling configs make sure all configs should not been used by auto-scaling group :param session: openstack session :param list configs: The list item value can be the ID of a config or a :class:`~openstack.auto_scaling.v2.config.Config` instance. :return: """ ids = [ config.id if isinstance(config, Config) else config for config in configs ] json_body = {"scaling_configuration_id": ids} endpoint_override = self.service.get_endpoint_override() return session.post("/scaling_configurations", headers={"Accept": "*"}, endpoint_filter=self.service, endpoint_override=endpoint_override, json=json_body)
def test_service(self): sot = auto_scaling_service.AutoScalingService() self.assertEqual('auto-scaling', sot.service_type) self.assertEqual('public', sot.interface) self.assertIsNone(sot.region) self.assertIsNone(sot.service_name) self.assertEqual(1, len(sot.valid_versions)) self.assertEqual('v1', sot.valid_versions[0].module) self.assertEqual('v1', sot.valid_versions[0].path)
class BaseTag(resource.Resource): resource_key = 'tag' resources_key = 'tags' base_path = 'scaling_group_tag/tags' service = auto_scaling_service.AutoScalingService() # allow_get = True allow_list = True tag = resource.Body('tags') key = resource.Body("key") values = resource.Body('values')
class Notification(resource.Resource): resource_key = 'topic' resources_key = 'topics' base_path = '/scaling_notification/%(scaling_group_id)s' service = auto_scaling_service.AutoScalingService() allow_list = True scaling_group_id = resource.URI('scaling_group_id') topic_name = resource.Body('topic_name') topic_urn = resource.Body('topic_urn') topic_scene = resource.Body("topic_scene") topics = resource.Body('topics')
class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' service = auto_scaling_service.AutoScalingService( version=dns_service.DNSService.UNVERSIONED ) # capabilities allow_list = True # Properties links = resource.Body('links') status = resource.Body('status')
class InstanceHookList(resource.Resource): resource_key = 'instance_hanging_info' resources_key = 'instance_hanging_infos' base_path = '/scaling_instance_hook/%(scaling_group_id)s/list' allow_list = True service = auto_scaling_service.AutoScalingService() _query_mapping = resource.QueryParameters('instance_id') lifecycle_action_key = resource.Body('lifecycle_action_key') instance_id = resource.Body('instance_id') lifecycle_hook_name = resource.Body('lifecycle_hook_name') scaling_group_id = resource.Body('scaling_group_id') lifecycle_hook_status = resource.Body('lifecycle_hook_status') timeout = resource.Body('timeout') default_result = resource.Body('default_result')
class Activity(resource.Resource): resource_key = "scaling_activity_log" resources_key = "scaling_activity_log" base_path = "/scaling_activity_log/%(scaling_group_id)s" query_marker_key = "start_number" service = auto_scaling_service.AutoScalingService() # capabilities allow_list = True _query_mapping = resource.QueryParameters("scaling_group_id", "start_time", "end_time", "limit", marker=query_marker_key) #: Properties #: AutoScaling Group Reference this activity belongs to scaling_group_id = resource.URI("scaling_group_id") #: AutoScaling Activity status #: valid values include: ``SUCCESS``, ``FAIL``, ``DOING`` status = resource.Body("status") #: UTC date and time of activity begin start_time = resource.Body("start_time") #: UTC date and time of activity finish end_time = resource.Body("end_time") #: AutoScaling Activity description description = resource.Body("description") #: changed instance number during the AutoScaling Activity scaling_value = resource.Body("scaling_value") #: current instance number during the AutoScaling Activity instance_value = resource.Body("instance_value") #: desired instance number of the AutoScaling Activity desire_value = resource.Body("desire_value") #: The instance list removed in the AutoScaling Activity instance_removed_list = resource.Body("instance_removed_list") #: The instance list deleted in the AutoScaling Activity instance_deleted_list = resource.Body("instance_deleted_list") #: The instance list added in the AutoScaling Activity instance_added_list = resource.Body("instance_added_list") @classmethod def get_next_marker(cls, response_json, yielded, query_params): from openstack.auto_scaling.v1 import get_next_marker return get_next_marker(response_json, yielded)
class Quota(resource.Resource): """AutoScaling Quota resource""" resource_key = "quotas.resources" resources_key = "quotas.resources" base_path = "/quotas" service = auto_scaling_service.AutoScalingService() # capabilities allow_list = True #: Properties #: Quota of type, current only ``alarm`` is valid type = resource.Body("type") #: Quota amount has been used used = resource.Body("used") #: Quota max amount max = resource.Body("max") #: Quota amount quota = resource.Body("quota")
class LifecycleHookBase(resource.Resource): resource_key = 'lifecycle_hook' resources_key = 'lifecycle_hooks' base_path = '/scaling_lifecycle_hook/%(scaling_group_id)s' service = auto_scaling_service.AutoScalingService() # capabilities allow_create = True allow_list = True allow_get = True allow_delete = True allow_update = True name = resource.Body("lifecycle_hook_name") scaling_group_id = resource.URI('scaling_group_id') lifecycle_hook_type = resource.Body('lifecycle_hook_type') default_result = resource.Body('default_result') default_timeout = resource.Body('default_timeout') notification_topic_urn = resource.Body('notification_topic_urn') notification_topic_name = resource.Body('notification_topic_name') notification_metadata = resource.Body('notification_metadata') create_time = resource.Body('create_time')
class Policy(resource.Resource): """AutoScaling Policy Resource""" resource_key = 'scaling_policy' resources_key = 'scaling_policies' base_path = '/scaling_policy' query_marker_key = 'start_number' service = auto_scaling_service.AutoScalingService() # capabilities allow_create = True allow_list = True allow_get = True allow_delete = True allow_update = True _query_mapping = resource.QueryParameters('scaling_group_id', 'limit', name='scaling_policy_name', type='scaling_policy_type', marker=query_marker_key) #: Properties #: AutoScaling policy ID id = resource.Body('scaling_policy_id') #: AutoScaling policy name name = resource.Body('scaling_policy_name') #: AutoScaling policy trigger type #: valid values include: ``ALARM``, ``SCHEDULED``, ``RECURRENCE`` type = resource.Body('scaling_policy_type') #: AutoScaling group reference the policy apply to scaling_group_id = resource.Body('scaling_group_id') alarm_id = resource.Body('alarm_id') scheduled_policy = resource.Body('scheduled_policy', type=ScheduledPolicy, default={}) scaling_policy_action = resource.Body('scaling_policy_action', type=Action, default={}) cool_down_time = resource.Body('cool_down_time') create_time = resource.Body('create_time') #: valid values include: ``INSERVICE``, ``PAUSED`` status = resource.Body('policy_status') @classmethod def get_next_marker(cls, response_json, yielded, query_params): from openstack.auto_scaling.v1 import get_next_marker return get_next_marker(response_json, yielded) @classmethod def get_list_uri(cls, params): return "/scaling_policy/%(scaling_group_id)s/list" % params def _action(self, session, body): """Preform alarm actions given the message body.""" url = utils.urljoin(self.base_path, self.id, "action") endpoint_override = self.service.get_endpoint_override() return session.post(url, endpoint_filter=self.service, endpoint_override=endpoint_override, json=body, headers={}) def execute(self, session): """execute policy""" body = {"action": "execute"} self._action(session, body) def pause(self, session): """pause policy""" body = {"action": "pause"} self._action(session, body) def resume(self, session): """resume policy""" body = {"action": "resume"} self._action(session, body)
class Instance(resource.Resource): resource_key = 'scaling_group_instance' resources_key = 'scaling_group_instances' # ok, we just fix the base path to list because there are no common rules # for the operations for instance base_path = '/scaling_group_instance/%(scaling_group_id)s/list' query_marker_key = 'start_number' service = auto_scaling_service.AutoScalingService() # capabilities allow_create = True allow_list = True allow_get = True allow_delete = True _query_mapping = resource.QueryParameters( "scaling_group_id", "health_status", "limit", lifecycle_status="life_cycle_state", marker=query_marker_key) #: Properties #: AutoScaling instance id id = resource.Body('instance_id') #: AutoScaling instance name name = resource.Body('instance_name') #: Id of AutoScaling group the instance belongs to scaling_group_id = resource.URI('scaling_group_id') #: Name of AutoScaling group the instance belongs to scaling_group_name = resource.Body('scaling_group_name') #: Id of AutoScaling config the instance create with scaling_configuration_id = resource.Body('scaling_configuration_id') #: Name of AutoScaling config the instance create with scaling_configuration_name = resource.Body('scaling_configuration_name') #: AutoScaling instance lifecycle state, valid values include: #: ``INSERVICE``, ``PENDING``, ``REMOVING`` lifecycle_state = resource.Body('life_cycle_state') #: AutoScaling instance health state, valid values include: #: ``INITIALIZING``, ``NORMAL``, ``ERROR`` health_status = resource.Body('health_status') #: AutoScaling instance create time create_time = resource.Body('create_time') @classmethod def get_next_marker(cls, response_json, yielded, query_params): from openstack.auto_scaling.v1 import get_next_marker return get_next_marker(response_json, yielded) def remove(self, session, delete_instance=False, ignore_missing=True): """Remove an instance of auto scaling group precondition: * the instance must in ``INSERVICE`` status * after remove the instance number of auto scaling group should not be less than min instance number * The owner auto scaling group should not in scaling status :param session: openstack session :param bool delete_instance: When set to ``True``, instance will be deleted after removed :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the config does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent config. :return: """ uri = utils.urljoin("/scaling_group_instance", self.id) endpoint_override = self.service.get_endpoint_override() delete_instance = "yes" if delete_instance else "no" return session.delete(uri, endpoint_filter=self.service, endpoint_override=endpoint_override, headers={"Accept": ""}, params={"instance_delete": delete_instance}) def batch_remove(self, session, instances, delete_instance=False): """batch remove auto-scaling instances make sure all configs should not been used by auto-scaling group :param session: openstack session :param list instances: The list item value can be the ID of an instance or a :class:`~openstack.auto_scaling.v2.instance.Instance` instance :param bool delete_instance: When set to ``True``, instance will be deleted after removed :return: """ ids = [ instance.id if isinstance(instance, Instance) else instance for instance in instances ] json_body = { "action": "REMOVE", "instances_id": ids, "instance_delete": "yes" if delete_instance else "no" } endpoint_override = self.service.get_endpoint_override() uri = utils.urljoin("/scaling_group_instance", self.scaling_group_id, "action") return session.post(uri, headers={"Accept": "*"}, endpoint_filter=self.service, endpoint_override=endpoint_override, json=json_body) def batch_add(self, session, instances): """batch remove auto-scaling instances make sure all configs should not been used by auto-scaling group :param session: openstack session :param list instances: The list item value can be the ID of an instance or a :class:`~openstack.auto_scaling.v2.instance.Instance` instance :return: """ ids = [ instance.id if isinstance(instance, Instance) else instance for instance in instances ] json_body = {"action": "ADD", "instances_id": ids} endpoint_override = self.service.get_endpoint_override() uri = utils.urljoin("/scaling_group_instance", self.scaling_group_id, "action") return session.post(uri, headers={"Accept": "*"}, endpoint_filter=self.service, endpoint_override=endpoint_override, json=json_body)
class Group(resource.Resource): resource_key = "scaling_group" resources_key = "scaling_groups" base_path = "/scaling_group" query_marker_key = "start_number" service = auto_scaling_service.AutoScalingService() # capabilities allow_create = True allow_list = True allow_get = True allow_delete = True allow_update = True _query_mapping = resource.QueryParameters("scaling_configuration_id", "limit", "project_id", "scaling_configuration_id", "start_number", "enterprise_project_id", name="scaling_group_name", status="scaling_group_status", marker=query_marker_key) #: Properties #: AutoScaling group ID id = resource.Body("scaling_group_id") #: AutoScaling group name name = resource.Body("scaling_group_name") #: AutoScaling group status, #: valid valus includes: ``INSERVICE``, ``PAUSED``, ``ERROR`` status = resource.Body("scaling_group_status") #: AutoScaling group scaling status, *Type: bool* is_scaling = resource.Body("is_scaling", type=bool) #: AutoScaling group detail detail = resource.Body("detail") #: VPC id - (Router Id) vpc_id = resource.Body("vpc_id") #: network id list - (Subnet) networks = resource.Body("networks", type=list) #: security group id list security_groups = resource.Body("security_groups", type=list) #: Auto Scaling Config ID reference, used for creating instance scaling_configuration_id = resource.Body("scaling_configuration_id") #: Auto Scaling Config name scaling_configuration_name = resource.Body("scaling_configuration_name") #: Current alive instance number current_instance_number = resource.Body("current_instance_number") #: Desire alive instance number desire_instance_number = resource.Body("desire_instance_number") #: min alive instance number min_instance_number = resource.Body("min_instance_number") #: max alive instance number max_instance_number = resource.Body("max_instance_number") #: CoolDown time, only work with `ALARM` policy. #: default is 900, valid range is 0-86400 cool_down_time = resource.Body("cool_down_time") #: load balancer listener id reference lb_listener_id = resource.Body("lb_listener_id") #: Health periodic audit method, Valid values includes: ``ELB_AUDIT``, #: ``NOVA_AUDIT``, ELB_AUDIT and lb_listener_id are used in pairs. health_periodic_audit_method = resource.Body( "health_periodic_audit_method") #: Health periodic audit time, valid values includes: ``5``, ``15``, #: ``60``, ``180``, default is ``5`` minutes health_periodic_audit_time = resource.Body("health_periodic_audit_time") #: Instance terminate policy, valid values includes: #: ``OLD_CONFIG_OLD_INSTANCE`` (default), ``OLD_CONFIG_NEW_INSTANCE``, #: ``OLD_INSTANCE``, ``NEW_INSTANCE`` instance_terminate_policy = resource.Body("instance_terminate_policy") #: notification methods, ``EMAIL`` notifications = resource.Body("notifications") #: Should delete public ip when terminate instance, default ``false`` delete_publicip = resource.Body("delete_publicip", type=bool) #: availability zones availability_zones = resource.Body("available_zones", type=list) #: Create time of the group create_time = resource.Body("create_time") # The grace period of the health status check of the scaling group is in the range of 0 to 86400, in seconds. # After the instance is added to the scaling group and the Enabled state is enabled, the health check grace period is started. # The scaling group checks the health of the instance after the health check grace period ends. # This parameter takes effect when the health check mode of the expansion group instance is ELB_AUDIT. # If this parameter is not set, the default is 600 health_periodic_audit_grace_period = resource.Body( "health_periodic_audit_grace_period", type=int) # lbaas listeners lbaas_listeners = resource.Body("lbaas_listeners", type=list) # enterprise project id enterprise_project_id = resource.Body("enterprise_project_id") # cloud location id cloud_location_id = resource.Body("cloud_location_id") @classmethod def get_next_marker(cls, response_json, yielded, query_params): from openstack.auto_scaling.v1 import get_next_marker return get_next_marker(response_json, yielded) def _action(self, session, body): """Preform group actions given the message body.""" url = utils.urljoin(self.base_path, self.id, "action") endpoint_override = self.service.get_endpoint_override() return session.post(url, endpoint_filter=self.service, endpoint_override=endpoint_override, json=body, headers={}) def resume(self, session): """resume group""" body = {"action": "resume"} self._action(session, body) def pause(self, session): """pause group""" body = {"action": "pause"} self._action(session, body)