class AWSScalingPolicy(heat_sp.AutoScalingPolicy): PROPERTIES = ( AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE, COOLDOWN, MIN_ADJUSTMENT_STEP, ) = ( 'AutoScalingGroupName', 'ScalingAdjustment', 'AdjustmentType', 'Cooldown', 'MinAdjustmentStep', ) ATTRIBUTES = ( ALARM_URL, ) = ( 'AlarmUrl', ) properties_schema = { AUTO_SCALING_GROUP_NAME: properties.Schema( properties.Schema.STRING, _('AutoScaling group name to apply policy to.'), required=True ), SCALING_ADJUSTMENT: properties.Schema( properties.Schema.INTEGER, _('Size of adjustment.'), required=True, update_allowed=True ), ADJUSTMENT_TYPE: properties.Schema( properties.Schema.STRING, _('Type of adjustment (absolute or percentage).'), required=True, constraints=[ constraints.AllowedValues( [sc_util.CFN_CHANGE_IN_CAPACITY, sc_util.CFN_EXACT_CAPACITY, sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY]), ], update_allowed=True ), COOLDOWN: properties.Schema( properties.Schema.INTEGER, _('Cooldown period, in seconds.'), update_allowed=True ), MIN_ADJUSTMENT_STEP: properties.Schema( properties.Schema.INTEGER, _('Minimum number of resources that are added or removed ' 'when the AutoScaling group scales up or down. This can ' 'be used only when specifying PercentChangeInCapacity ' 'for the AdjustmentType property.'), constraints=[ constraints.Range( min=0, ), ], update_allowed=True ), } attributes_schema = { ALARM_URL: attributes.Schema( _("A signed url to handle the alarm. (Heat extension)."), type=attributes.Schema.STRING ), } def _validate_min_adjustment_step(self): adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE) adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP) if (adjustment_type != sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY and adjustment_step is not None): raise exception.ResourcePropertyValueDependency( prop1=self.MIN_ADJUSTMENT_STEP, prop2=self.ADJUSTMENT_TYPE, value=sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY) def get_reference_id(self): if self.resource_id is not None: return six.text_type(self._get_ec2_signed_url()) else: return six.text_type(self.name)
class QoSBandwidthLimitRule(QoSRule): """A resource for Neutron QoS bandwidth limit rule. This rule can be associated with QoS policy, and then the policy can be used by neutron port and network, to provide bandwidth limit QoS capabilities. The default policy usage of this resource is limited to administrators only. """ PROPERTIES = ( MAX_BANDWIDTH, MAX_BURST_BANDWIDTH, ) = ( 'max_kbps', 'max_burst_kbps', ) properties_schema = { MAX_BANDWIDTH: properties.Schema( properties.Schema.INTEGER, _('Max bandwidth in kbps.'), required=True, update_allowed=True, constraints=[ constraints.Range(min=0) ] ), MAX_BURST_BANDWIDTH: properties.Schema( properties.Schema.INTEGER, _('Max burst bandwidth in kbps.'), update_allowed=True, constraints=[ constraints.Range(min=0) ], default=0 ) } properties_schema.update(QoSRule.properties_schema) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props.pop(self.POLICY) rule = self.client().create_bandwidth_limit_rule( self.policy_id, {'bandwidth_limit_rule': props})['bandwidth_limit_rule'] self.resource_id_set(rule['id']) def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: self.client().delete_bandwidth_limit_rule( self.resource_id, self.policy_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_bandwidth_limit_rule( self.resource_id, self.policy_id, {'bandwidth_limit_rule': prop_diff}) def _show_resource(self): return self.client().show_bandwidth_limit_rule( self.resource_id, self.policy_id)['bandwidth_limit_rule']
class Router(neutron.NeutronResource): """A resource that implements Neutron router. Router is a physical or virtual network device that passes network traffic between different networks. """ required_service_extension = 'router' PROPERTIES = ( NAME, EXTERNAL_GATEWAY, VALUE_SPECS, ADMIN_STATE_UP, L3_AGENT_ID, L3_AGENT_IDS, DISTRIBUTED, HA, ) = ('name', 'external_gateway_info', 'value_specs', 'admin_state_up', 'l3_agent_id', 'l3_agent_ids', 'distributed', 'ha') _EXTERNAL_GATEWAY_KEYS = ( EXTERNAL_GATEWAY_NETWORK, EXTERNAL_GATEWAY_ENABLE_SNAT, EXTERNAL_GATEWAY_FIXED_IPS, ) = ( 'network', 'enable_snat', 'external_fixed_ips', ) _EXTERNAL_GATEWAY_FIXED_IPS_KEYS = (IP_ADDRESS, SUBNET) = ('ip_address', 'subnet') ATTRIBUTES = ( STATUS, EXTERNAL_GATEWAY_INFO_ATTR, NAME_ATTR, ADMIN_STATE_UP_ATTR, TENANT_ID, ) = ( 'status', 'external_gateway_info', 'name', 'admin_state_up', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('The name of the router.'), update_allowed=True), EXTERNAL_GATEWAY: properties.Schema( properties.Schema.MAP, _('External network gateway configuration for a router.'), schema={ EXTERNAL_GATEWAY_NETWORK: properties.Schema( properties.Schema.STRING, _('ID or name of the external network for the gateway.'), required=True, update_allowed=True), EXTERNAL_GATEWAY_ENABLE_SNAT: properties.Schema( properties.Schema.BOOLEAN, _('Enables Source NAT on the router gateway. NOTE: The ' 'default policy setting in Neutron restricts usage of ' 'this property to administrative users only.'), update_allowed=True), EXTERNAL_GATEWAY_FIXED_IPS: properties.Schema( properties.Schema.LIST, _('External fixed IP addresses for the gateway.'), schema=properties.Schema( properties.Schema.MAP, schema={ IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('External fixed IP address.'), constraints=[ constraints.CustomConstraint('ip_addr'), ]), SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet of external fixed IP address.'), constraints=[ constraints.CustomConstraint( 'neutron.subnet') ]), }), update_allowed=True, support_status=support.SupportStatus(version='6.0.0')), }, update_allowed=True), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the creation request.'), default={}, update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of the router.'), default=True, update_allowed=True), L3_AGENT_ID: properties.Schema( properties.Schema.STRING, _('ID of the L3 agent. NOTE: The default policy setting in ' 'Neutron restricts usage of this property to administrative ' 'users only.'), update_allowed=True, support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2015.1', message=_('Use property %s.') % L3_AGENT_IDS, previous_status=support.SupportStatus(version='2014.1'))), ), L3_AGENT_IDS: properties.Schema( properties.Schema.LIST, _('ID list of the L3 agent. User can specify multi-agents ' 'for highly available router. NOTE: The default policy ' 'setting in Neutron restricts usage of this property to ' 'administrative users only.'), schema=properties.Schema(properties.Schema.STRING, ), update_allowed=True, support_status=support.SupportStatus(version='2015.1')), DISTRIBUTED: properties.Schema( properties.Schema.BOOLEAN, _('Indicates whether or not to create a distributed router. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only. This property ' 'can not be used in conjunction with the L3 agent ID.'), support_status=support.SupportStatus(version='2015.1')), HA: properties.Schema( properties.Schema.BOOLEAN, _('Indicates whether or not to create a highly available router. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only. And now neutron ' 'do not support distributed and ha at the same time.'), support_status=support.SupportStatus(version='2015.1')), } attributes_schema = { STATUS: attributes.Schema(_("The status of the router."), type=attributes.Schema.STRING), EXTERNAL_GATEWAY_INFO_ATTR: attributes.Schema(_("Gateway network for the router."), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_("Friendly name of the router."), type=attributes.Schema.STRING), ADMIN_STATE_UP_ATTR: attributes.Schema(_("Administrative state of the router."), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_("Tenant owning the router."), type=attributes.Schema.STRING), } def translation_rules(self, props): rules = [ translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network'), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [ self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_FIXED_IPS, self.SUBNET ], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet') ] if props.get(self.L3_AGENT_ID): rules.extend([ translation.TranslationRule(props, translation.TranslationRule.ADD, [self.L3_AGENT_IDS], [props.get(self.L3_AGENT_ID)]), translation.TranslationRule(props, translation.TranslationRule.DELETE, [self.L3_AGENT_ID]) ]) return rules def validate(self): super(Router, self).validate() is_distributed = self.properties[self.DISTRIBUTED] l3_agent_id = self.properties[self.L3_AGENT_ID] l3_agent_ids = self.properties[self.L3_AGENT_IDS] is_ha = self.properties[self.HA] if l3_agent_id and l3_agent_ids: raise exception.ResourcePropertyConflict(self.L3_AGENT_ID, self.L3_AGENT_IDS) # do not specific l3 agent when creating a distributed router if is_distributed and (l3_agent_id or l3_agent_ids): raise exception.ResourcePropertyConflict( self.DISTRIBUTED, "/".join([self.L3_AGENT_ID, self.L3_AGENT_IDS])) if is_ha and is_distributed: raise exception.ResourcePropertyConflict(self.DISTRIBUTED, self.HA) if not is_ha and l3_agent_ids and len(l3_agent_ids) > 1: msg = _('Non HA routers can only have one L3 agent.') raise exception.StackValidationFailed(message=msg) def add_dependencies(self, deps): super(Router, self).add_dependencies(deps) external_gw = self.properties[self.EXTERNAL_GATEWAY] if external_gw: external_gw_net = external_gw.get(self.EXTERNAL_GATEWAY_NETWORK) for res in six.itervalues(self.stack): if res.has_interface('OS::Neutron::Subnet'): subnet_net = res.properties.get(subnet.Subnet.NETWORK) if subnet_net == external_gw_net: deps += (self, res) def _resolve_gateway(self, props): gateway = props.get(self.EXTERNAL_GATEWAY) if gateway: gateway['network_id'] = gateway.pop(self.EXTERNAL_GATEWAY_NETWORK) if gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] is None: del gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] if gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] is None: del gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] else: self._resolve_subnet(gateway) return props def _get_l3_agent_list(self, props): l3_agent_id = props.pop(self.L3_AGENT_ID, None) l3_agent_ids = props.pop(self.L3_AGENT_IDS, None) if not l3_agent_ids and l3_agent_id: l3_agent_ids = [l3_agent_id] return l3_agent_ids def _resolve_subnet(self, gateway): external_gw_fixed_ips = gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] for fixed_ip in external_gw_fixed_ips: for key, value in six.iteritems(fixed_ip): if value is None: fixed_ip.pop(key) if self.SUBNET in fixed_ip: fixed_ip['subnet_id'] = fixed_ip.pop(self.SUBNET) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) self._resolve_gateway(props) l3_agent_ids = self._get_l3_agent_list(props) router = self.client().create_router({'router': props})['router'] self.resource_id_set(router['id']) if l3_agent_ids: self._replace_agent(l3_agent_ids) def _show_resource(self): return self.client().show_router(self.resource_id)['router'] def check_create_complete(self, *args): attributes = self._show_resource() return self.is_built(attributes) def handle_delete(self): if not self.resource_id: return try: self.client().delete_router(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): if self.EXTERNAL_GATEWAY in prop_diff: self._resolve_gateway(prop_diff) if self.L3_AGENT_IDS in prop_diff or self.L3_AGENT_ID in prop_diff: l3_agent_ids = self._get_l3_agent_list(prop_diff) self._replace_agent(l3_agent_ids) if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_router(self.resource_id, {'router': prop_diff}) def _replace_agent(self, l3_agent_ids=None): ret = self.client().list_l3_agent_hosting_routers(self.resource_id) for agent in ret['agents']: self.client().remove_router_from_l3_agent(agent['id'], self.resource_id) if l3_agent_ids: for l3_agent_id in l3_agent_ids: self.client().add_router_to_l3_agent( l3_agent_id, {'router_id': self.resource_id})
class KeystoneRegion(resource.Resource): """Heat Template Resource for Keystone Region. This plug-in helps to create, update and delete a keystone region. Also it can be used for enable or disable a given keystone region. """ support_status = support.SupportStatus( version='6.0.0', message=_('Supported versions: keystone v3')) default_client_name = 'keystone' entity = 'regions' PROPERTIES = ( ID, PARENT_REGION, DESCRIPTION, ENABLED ) = ( 'id', 'parent_region', 'description', 'enabled' ) properties_schema = { ID: properties.Schema( properties.Schema.STRING, _('The user-defined region ID and should unique to the OpenStack ' 'deployment. While creating the region, heat will url encode ' 'this ID.') ), PARENT_REGION: properties.Schema( properties.Schema.STRING, _('If the region is hierarchically a child of another region, ' 'set this parameter to the ID of the parent region.'), update_allowed=True, constraints=[constraints.CustomConstraint('keystone.region')] ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of keystone region.'), update_allowed=True ), ENABLED: properties.Schema( properties.Schema.BOOLEAN, _('This region is enabled or disabled.'), default=True, update_allowed=True ) } def client(self): return super(KeystoneRegion, self).client().client def handle_create(self): region_id = self.properties[self.ID] description = self.properties[self.DESCRIPTION] parent_region = self.properties[self.PARENT_REGION] enabled = self.properties[self.ENABLED] region = self.client().regions.create( id=parse.quote(region_id) if region_id else None, parent_region=parent_region, description=description, enabled=enabled) self.resource_id_set(region.id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: description = prop_diff.get(self.DESCRIPTION) enabled = prop_diff.get(self.ENABLED) parent_region = prop_diff.get(self.PARENT_REGION) self.client().regions.update( region=self.resource_id, parent_region=parent_region, description=description, enabled=enabled )
class LoadBalancer(stack_resource.StackResource): """Implements a HAProxy-bearing instance as a nested stack. The template for the nested stack can be redefined with ``loadbalancer_template`` option in ``heat.conf``. Generally the image used for the instance must have the following packages installed or available for installation at runtime:: - heat-cfntools and its dependencies like python-psutil - cronie - socat - haproxy Current default builtin template uses Fedora 21 x86_64 base cloud image (https://getfedora.org/cloud/download/) and apart from installing packages goes through some hoops around SELinux due to pecularities of heat-cfntools. """ PROPERTIES = ( AVAILABILITY_ZONES, HEALTH_CHECK, INSTANCES, LISTENERS, APP_COOKIE_STICKINESS_POLICY, LBCOOKIE_STICKINESS_POLICY, SECURITY_GROUPS, SUBNETS, ) = ( 'AvailabilityZones', 'HealthCheck', 'Instances', 'Listeners', 'AppCookieStickinessPolicy', 'LBCookieStickinessPolicy', 'SecurityGroups', 'Subnets', ) _HEALTH_CHECK_KEYS = ( HEALTH_CHECK_HEALTHY_THRESHOLD, HEALTH_CHECK_INTERVAL, HEALTH_CHECK_TARGET, HEALTH_CHECK_TIMEOUT, HEALTH_CHECK_UNHEALTHY_THRESHOLD, ) = ( 'HealthyThreshold', 'Interval', 'Target', 'Timeout', 'UnhealthyThreshold', ) _LISTENER_KEYS = ( LISTENER_INSTANCE_PORT, LISTENER_LOAD_BALANCER_PORT, LISTENER_PROTOCOL, LISTENER_SSLCERTIFICATE_ID, LISTENER_POLICY_NAMES, ) = ( 'InstancePort', 'LoadBalancerPort', 'Protocol', 'SSLCertificateId', 'PolicyNames', ) ATTRIBUTES = ( CANONICAL_HOSTED_ZONE_NAME, CANONICAL_HOSTED_ZONE_NAME_ID, DNS_NAME, SOURCE_SECURITY_GROUP_GROUP_NAME, SOURCE_SECURITY_GROUP_OWNER_ALIAS, ) = ( 'CanonicalHostedZoneName', 'CanonicalHostedZoneNameID', 'DNSName', 'SourceSecurityGroup.GroupName', 'SourceSecurityGroup.OwnerAlias', ) properties_schema = { AVAILABILITY_ZONES: properties.Schema( properties.Schema.LIST, _('The Availability Zones in which to create the load balancer.'), required=True), HEALTH_CHECK: properties.Schema( properties.Schema.MAP, _('An application health check for the instances.'), schema={ HEALTH_CHECK_HEALTHY_THRESHOLD: properties.Schema( properties.Schema.INTEGER, _('The number of consecutive health probe successes ' 'required before moving the instance to the ' 'healthy state.'), required=True), HEALTH_CHECK_INTERVAL: properties.Schema( properties.Schema.INTEGER, _('The approximate interval, in seconds, between ' 'health checks of an individual instance.'), required=True), HEALTH_CHECK_TARGET: properties.Schema(properties.Schema.STRING, _('The port being checked.'), required=True), HEALTH_CHECK_TIMEOUT: properties.Schema(properties.Schema.INTEGER, _('Health probe timeout, in seconds.'), required=True), HEALTH_CHECK_UNHEALTHY_THRESHOLD: properties.Schema( properties.Schema.INTEGER, _('The number of consecutive health probe failures ' 'required before moving the instance to the ' 'unhealthy state'), required=True), }), INSTANCES: properties.Schema(properties.Schema.LIST, _('The list of instance IDs load balanced.'), update_allowed=True), LISTENERS: properties.Schema( properties.Schema.LIST, _('One or more listeners for this load balancer.'), schema=properties.Schema( properties.Schema.MAP, schema={ LISTENER_INSTANCE_PORT: properties.Schema( properties.Schema.INTEGER, _('TCP port on which the instance server is ' 'listening.'), required=True), LISTENER_LOAD_BALANCER_PORT: properties.Schema( properties.Schema.INTEGER, _('The external load balancer port number.'), required=True), LISTENER_PROTOCOL: properties.Schema( properties.Schema.STRING, _('The load balancer transport protocol to use.'), required=True, constraints=[ constraints.AllowedValues(['TCP', 'HTTP']), ]), LISTENER_SSLCERTIFICATE_ID: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), implemented=False), LISTENER_POLICY_NAMES: properties.Schema(properties.Schema.LIST, _('Not Implemented.'), implemented=False), }, ), required=True), APP_COOKIE_STICKINESS_POLICY: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), implemented=False), LBCOOKIE_STICKINESS_POLICY: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), implemented=False), SECURITY_GROUPS: properties.Schema(properties.Schema.LIST, _('List of Security Groups assigned on current LB.'), update_allowed=True), SUBNETS: properties.Schema(properties.Schema.LIST, _('Not Implemented.'), implemented=False), } attributes_schema = { CANONICAL_HOSTED_ZONE_NAME: attributes.Schema(_( "The name of the hosted zone that is associated with the " "LoadBalancer."), type=attributes.Schema.STRING), CANONICAL_HOSTED_ZONE_NAME_ID: attributes.Schema(_( "The ID of the hosted zone name that is associated with the " "LoadBalancer."), type=attributes.Schema.STRING), DNS_NAME: attributes.Schema(_("The DNS name for the LoadBalancer."), type=attributes.Schema.STRING), SOURCE_SECURITY_GROUP_GROUP_NAME: attributes.Schema(_( "The security group that you can use as part of your inbound " "rules for your LoadBalancer's back-end instances."), type=attributes.Schema.STRING), SOURCE_SECURITY_GROUP_OWNER_ALIAS: attributes.Schema(_("Owner of the source security group."), type=attributes.Schema.STRING), } def _haproxy_config_global(self): return ''' global daemon maxconn 256 stats socket /tmp/.haproxy-stats defaults mode http timeout connect 5000ms timeout client 50000ms timeout server 50000ms ''' def _haproxy_config_frontend(self): listener = self.properties[self.LISTENERS][0] lb_port = listener[self.LISTENER_LOAD_BALANCER_PORT] return ''' frontend http bind *:%s default_backend servers ''' % (lb_port) def _haproxy_config_backend(self): health_chk = self.properties[self.HEALTH_CHECK] if health_chk: timeout = int(health_chk[self.HEALTH_CHECK_TIMEOUT]) timeout_check = 'timeout check %ds' % timeout spaces = ' ' else: timeout_check = '' spaces = '' return ''' backend servers balance roundrobin option http-server-close option forwardfor option httpchk %s%s ''' % (spaces, timeout_check) def _haproxy_config_servers(self, instances): listener = self.properties[self.LISTENERS][0] inst_port = listener[self.LISTENER_INSTANCE_PORT] spaces = ' ' check = '' health_chk = self.properties[self.HEALTH_CHECK] if health_chk: check = ' check inter %ss fall %s rise %s' % ( health_chk[self.HEALTH_CHECK_INTERVAL], health_chk[self.HEALTH_CHECK_UNHEALTHY_THRESHOLD], health_chk[self.HEALTH_CHECK_HEALTHY_THRESHOLD]) servers = [] n = 1 nova_cp = self.client_plugin('nova') for i in instances or []: ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0' LOG.debug('haproxy server:%s' % ip) servers.append('%sserver server%d %s:%s%s' % (spaces, n, ip, inst_port, check)) n = n + 1 return '\n'.join(servers) def _haproxy_config(self, instances): # initial simplifications: # - only one Listener # - only http (no tcp or ssl) # # option httpchk HEAD /check.txt HTTP/1.0 return '%s%s%s%s\n' % (self._haproxy_config_global(), self._haproxy_config_frontend(), self._haproxy_config_backend(), self._haproxy_config_servers(instances)) def get_parsed_template(self): if cfg.CONF.loadbalancer_template: with open(cfg.CONF.loadbalancer_template) as templ_fd: LOG.info(_LI('Using custom loadbalancer template %s'), cfg.CONF.loadbalancer_template) contents = templ_fd.read() else: contents = lb_template_default return template_format.parse(contents) def child_params(self): params = {} params['SecurityGroups'] = self.properties[self.SECURITY_GROUPS] # If the owning stack defines KeyName, we use that key for the nested # template, otherwise use no key for magic_param in ('KeyName', 'LbFlavor', 'LBTimeout', 'LbImageId'): if magic_param in self.stack.parameters: params[magic_param] = self.stack.parameters[magic_param] return params def child_template(self): templ = self.get_parsed_template() # If the owning stack defines KeyName, we use that key for the nested # template, otherwise use no key if 'KeyName' not in self.stack.parameters: del templ['Resources']['LB_instance']['Properties']['KeyName'] del templ['Parameters']['KeyName'] return templ def handle_create(self): templ = self.child_template() params = self.child_params() if self.properties[self.INSTANCES]: md = templ['Resources']['LB_instance']['Metadata'] files = md['AWS::CloudFormation::Init']['config']['files'] cfg = self._haproxy_config(self.properties[self.INSTANCES]) files['/etc/haproxy/haproxy.cfg']['content'] = cfg return self.create_with_template(templ, params) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Re-generate the Metadata. Save it to the db. Rely on the cfn-hup to reconfigure HAProxy. """ new_props = json_snippet.properties(self.properties_schema, self.context) # Valid use cases are: # - Membership controlled by members property in template # - Empty members property in template; membership controlled by # "updates" triggered from autoscaling group. # Mixing the two will lead to undefined behaviour. if (self.INSTANCES in prop_diff and (self.properties[self.INSTANCES] is not None or new_props[self.INSTANCES] is not None)): cfg = self._haproxy_config(prop_diff[self.INSTANCES]) md = self.nested()['LB_instance'].metadata_get() files = md['AWS::CloudFormation::Init']['config']['files'] files['/etc/haproxy/haproxy.cfg']['content'] = cfg self.nested()['LB_instance'].metadata_set(md) if self.SECURITY_GROUPS in prop_diff: templ = self.child_template() params = self.child_params() params['SecurityGroups'] = new_props[self.SECURITY_GROUPS] self.update_with_template(templ, params) def check_update_complete(self, updater): """Because we are not calling update_with_template, return True.""" return True def validate(self): """Validate any of the provided params.""" res = super(LoadBalancer, self).validate() if res: return res if (cfg.CONF.loadbalancer_template and not os.access(cfg.CONF.loadbalancer_template, os.R_OK)): msg = _('Custom LoadBalancer template can not be found') raise exception.StackValidationFailed(message=msg) health_chk = self.properties[self.HEALTH_CHECK] if health_chk: interval = float(health_chk[self.HEALTH_CHECK_INTERVAL]) timeout = float(health_chk[self.HEALTH_CHECK_TIMEOUT]) if interval < timeout: return {'Error': 'Interval must be larger than Timeout'} def get_reference_id(self): return six.text_type(self.name) def _resolve_attribute(self, name): """We don't really support any of these yet.""" if name == self.DNS_NAME: return self.get_output('PublicIp') elif name in self.attributes_schema: # Not sure if we should return anything for the other attribs # since they aren't really supported in any meaningful way return ''
class KeystoneUser(resource.Resource, role_assignments.KeystoneRoleAssignmentMixin): """Heat Template Resource for Keystone User. Users represent an individual API consumer. A user itself must be owned by a specific domain, and hence all user names are not globally unique, but only unique to their domain. """ support_status = support.SupportStatus( version='2015.1', message=_('Supported versions: keystone v3')) default_client_name = 'keystone' entity = 'users' PROPERTIES = ( NAME, DOMAIN, DESCRIPTION, ENABLED, EMAIL, PASSWORD, DEFAULT_PROJECT, GROUPS ) = ( 'name', 'domain', 'description', 'enabled', 'email', 'password', 'default_project', 'groups' ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of keystone user.'), update_allowed=True ), DOMAIN: properties.Schema( properties.Schema.STRING, _('Name of keystone domain.'), default='default', update_allowed=True, constraints=[constraints.CustomConstraint('keystone.domain')] ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of keystone user.'), default='', update_allowed=True ), ENABLED: properties.Schema( properties.Schema.BOOLEAN, _('Keystone user is enabled or disabled.'), default=True, update_allowed=True ), EMAIL: properties.Schema( properties.Schema.STRING, _('Email address of keystone user.'), update_allowed=True ), PASSWORD: properties.Schema( properties.Schema.STRING, _('Password of keystone user.'), update_allowed=True ), DEFAULT_PROJECT: properties.Schema( properties.Schema.STRING, _('Default project of keystone user.'), update_allowed=True, constraints=[constraints.CustomConstraint('keystone.project')] ), GROUPS: properties.Schema( properties.Schema.LIST, _('Keystone user groups.'), update_allowed=True, schema=properties.Schema( properties.Schema.STRING, _('Keystone user group.'), constraints=[constraints.CustomConstraint('keystone.group')] ) ) } properties_schema.update( role_assignments.KeystoneRoleAssignmentMixin.mixin_properties_schema) def validate(self): super(KeystoneUser, self).validate() self.validate_assignment_properties() def client(self): return super(KeystoneUser, self).client().client def _update_user(self, user_id, domain, new_name=None, new_description=None, new_email=None, new_password=None, new_default_project=None, enabled=None): values = dict() if new_name is not None: values['name'] = new_name if new_description is not None: values['description'] = new_description if new_email is not None: values['email'] = new_email if new_password is not None: values['password'] = new_password if new_default_project is not None: values['default_project'] = new_default_project if enabled is not None: values['enabled'] = enabled # If there're no args above, keystone raises BadRequest error with # message about not enough parameters for updating, so return from # this method to prevent raising error. if not values: return values['user'] = user_id domain = (self.client_plugin().get_domain_id(domain)) values['domain'] = domain return self.client().users.update(**values) def _add_user_to_groups(self, user_id, groups): if groups is not None: group_ids = [self.client_plugin().get_group_id(group) for group in groups] for group_id in group_ids: self.client().users.add_to_group(user_id, group_id) def _remove_user_from_groups(self, user_id, groups): if groups is not None: group_ids = [self.client_plugin().get_group_id(group) for group in groups] for group_id in group_ids: self.client().users.remove_from_group(user_id, group_id) def _find_diff(self, updated_prps, stored_prps): new_group_ids = [self.client_plugin().get_group_id(group) for group in (set(updated_prps or []) - set(stored_prps or []))] removed_group_ids = [self.client_plugin().get_group_id(group) for group in (set(stored_prps or []) - set(updated_prps or []))] return new_group_ids, removed_group_ids def handle_create(self): user_name = (self.properties[self.NAME] or self.physical_resource_name()) description = self.properties[self.DESCRIPTION] domain = self.client_plugin().get_domain_id( self.properties[self.DOMAIN]) enabled = self.properties[self.ENABLED] email = self.properties[self.EMAIL] password = self.properties[self.PASSWORD] default_project = self.client_plugin().get_project_id( self.properties[self.DEFAULT_PROJECT]) groups = self.properties[self.GROUPS] user = self.client().users.create( name=user_name, domain=domain, description=description, enabled=enabled, email=email, password=password, default_project=default_project) self.resource_id_set(user.id) self._add_user_to_groups(user.id, groups) self.create_assignment(user_id=user.id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: name = None # Don't update the name if no change if self.NAME in prop_diff: name = prop_diff[self.NAME] or self.physical_resource_name() description = prop_diff.get(self.DESCRIPTION) enabled = prop_diff.get(self.ENABLED) email = prop_diff.get(self.EMAIL) password = prop_diff.get(self.PASSWORD) domain = (prop_diff.get(self.DOMAIN) or self._stored_properties_data.get(self.DOMAIN)) default_project = prop_diff.get(self.DEFAULT_PROJECT) self._update_user( user_id=self.resource_id, domain=domain, new_name=name, new_description=description, enabled=enabled, new_default_project=default_project, new_email=email, new_password=password ) if self.GROUPS in prop_diff: (new_group_ids, removed_group_ids) = self._find_diff( prop_diff[self.GROUPS], self._stored_properties_data.get(self.GROUPS)) if new_group_ids: self._add_user_to_groups(self.resource_id, new_group_ids) if removed_group_ids: self._remove_user_from_groups(self.resource_id, removed_group_ids) self.update_assignment(prop_diff=prop_diff, user_id=self.resource_id) def handle_delete(self): if self.resource_id is not None: with self.client_plugin().ignore_not_found: if self._stored_properties_data.get(self.GROUPS) is not None: self._remove_user_from_groups( self.resource_id, [self.client_plugin().get_group_id(group) for group in self._stored_properties_data.get(self.GROUPS)]) self.client().users.delete(self.resource_id)
class ElasticIpAssociation(resource.Resource): PROPERTIES = ( INSTANCE_ID, EIP, ALLOCATION_ID, NETWORK_INTERFACE_ID, ) = ( 'InstanceId', 'EIP', 'AllocationId', 'NetworkInterfaceId', ) properties_schema = { INSTANCE_ID: properties.Schema( properties.Schema.STRING, _('Instance ID to associate with EIP specified by EIP property.'), update_allowed=True, constraints=[constraints.CustomConstraint('nova.server')]), EIP: properties.Schema( properties.Schema.STRING, _('EIP address to associate with instance.'), update_allowed=True, constraints=[constraints.CustomConstraint('ip_addr')]), ALLOCATION_ID: properties.Schema(properties.Schema.STRING, _('Allocation ID for VPC EIP address.'), update_allowed=True), NETWORK_INTERFACE_ID: properties.Schema(properties.Schema.STRING, _('Network interface ID to associate with EIP.'), update_allowed=True), } default_client_name = 'nova' def get_reference_id(self): return self.physical_resource_name_or_FnGetRefId() def validate(self): """Validate any of the provided parameters.""" super(ElasticIpAssociation, self).validate() eip = self.properties[self.EIP] allocation_id = self.properties[self.ALLOCATION_ID] instance_id = self.properties[self.INSTANCE_ID] ni_id = self.properties[self.NETWORK_INTERFACE_ID] # to check EIP and ALLOCATION_ID, should provide one of if bool(eip) == bool(allocation_id): msg = _("Either 'EIP' or 'AllocationId' must be provided.") raise exception.StackValidationFailed(message=msg) # to check if has EIP, also should specify InstanceId if eip and not instance_id: msg = _("Must specify 'InstanceId' if you specify 'EIP'.") raise exception.StackValidationFailed(message=msg) # to check InstanceId and NetworkInterfaceId, should provide # at least one if not instance_id and not ni_id: raise exception.PropertyUnspecifiedError('InstanceId', 'NetworkInterfaceId') def _get_port_info(self, ni_id=None, instance_id=None): port_id = None port_rsrc = None if ni_id: port_rsrc = self.neutron().list_ports(id=ni_id)['ports'][0] port_id = ni_id elif instance_id: ports = self.neutron().list_ports(device_id=instance_id) port_rsrc = ports['ports'][0] port_id = port_rsrc['id'] return port_id, port_rsrc def _neutron_add_gateway_router(self, float_id, network_id): router = vpc.VPC.router_for_vpc(self.neutron(), network_id) if router is not None: floatingip = self.neutron().show_floatingip(float_id) floating_net_id = floatingip['floatingip']['floating_network_id'] self.neutron().add_gateway_router(router['id'], {'network_id': floating_net_id}) def _neutron_update_floating_ip(self, allocationId, port_id=None, ignore_not_found=False): try: self.neutron().update_floatingip( allocationId, {'floatingip': { 'port_id': port_id }}) except Exception as e: if ignore_not_found: self.client_plugin('neutron').ignore_not_found(e) else: raise def _nova_remove_floating_ip(self, instance_id, eip, ignore_not_found=False): server = None try: server = self.client().servers.get(instance_id) server.remove_floating_ip(eip) except Exception as e: is_not_found = self.client_plugin('nova').is_not_found(e) iue = self.client_plugin('nova').is_unprocessable_entity(e) if ((not ignore_not_found and is_not_found) or (not is_not_found and not iue)): raise return server def _floatingIp_detach(self, nova_ignore_not_found=False, neutron_ignore_not_found=False): eip = self.properties[self.EIP] allocation_id = self.properties[self.ALLOCATION_ID] instance_id = self.properties[self.INSTANCE_ID] server = None if eip: # if has eip_old, to remove the eip_old from the instance server = self._nova_remove_floating_ip(instance_id, eip, nova_ignore_not_found) else: # if hasn't eip_old, to update neutron floatingIp self._neutron_update_floating_ip(allocation_id, None, neutron_ignore_not_found) return server def _handle_update_eipInfo(self, prop_diff): eip_update = prop_diff.get(self.EIP) allocation_id_update = prop_diff.get(self.ALLOCATION_ID) instance_id = self.properties[self.INSTANCE_ID] ni_id = self.properties[self.NETWORK_INTERFACE_ID] if eip_update: server = self._floatingIp_detach(neutron_ignore_not_found=True) if server: # then to attach the eip_update to the instance server.add_floating_ip(eip_update) self.resource_id_set(eip_update) elif allocation_id_update: self._floatingIp_detach(nova_ignore_not_found=True) port_id, port_rsrc = self._get_port_info(ni_id, instance_id) if not port_id or not port_rsrc: LOG.error(_LE('Port not specified.')) raise exception.NotFound( _('Failed to update, can not found ' 'port info.')) network_id = port_rsrc['network_id'] self._neutron_add_gateway_router(allocation_id_update, network_id) self._neutron_update_floating_ip(allocation_id_update, port_id) self.resource_id_set(allocation_id_update) def _handle_update_portInfo(self, prop_diff): instance_id_update = prop_diff.get(self.INSTANCE_ID) ni_id_update = prop_diff.get(self.NETWORK_INTERFACE_ID) eip = self.properties[self.EIP] allocation_id = self.properties[self.ALLOCATION_ID] # if update portInfo, no need to detach the port from # old instance/floatingip. if eip: server = self.client().servers.get(instance_id_update) server.add_floating_ip(eip) else: port_id, port_rsrc = self._get_port_info(ni_id_update, instance_id_update) if not port_id or not port_rsrc: LOG.error(_LE('Port not specified.')) raise exception.NotFound( _('Failed to update, can not found ' 'port info.')) network_id = port_rsrc['network_id'] self._neutron_add_gateway_router(allocation_id, network_id) self._neutron_update_floating_ip(allocation_id, port_id) def handle_create(self): """Add a floating IP address to a server.""" if self.properties[self.EIP]: server = self.client().servers.get( self.properties[self.INSTANCE_ID]) server.add_floating_ip(self.properties[self.EIP]) self.resource_id_set(self.properties[self.EIP]) LOG.debug( 'ElasticIpAssociation ' '%(instance)s.add_floating_ip(%(eip)s)', { 'instance': self.properties[self.INSTANCE_ID], 'eip': self.properties[self.EIP] }) elif self.properties[self.ALLOCATION_ID]: ni_id = self.properties[self.NETWORK_INTERFACE_ID] instance_id = self.properties[self.INSTANCE_ID] port_id, port_rsrc = self._get_port_info(ni_id, instance_id) if not port_id or not port_rsrc: LOG.warning( _LW('Skipping association, resource not ' 'specified')) return float_id = self.properties[self.ALLOCATION_ID] network_id = port_rsrc['network_id'] self._neutron_add_gateway_router(float_id, network_id) self._neutron_update_floating_ip(float_id, port_id) self.resource_id_set(float_id) def handle_delete(self): """Remove a floating IP address from a server or port.""" if self.resource_id is None: return if self.properties[self.EIP]: instance_id = self.properties[self.INSTANCE_ID] eip = self.properties[self.EIP] self._nova_remove_floating_ip(instance_id, eip, ignore_not_found=True) elif self.properties[self.ALLOCATION_ID]: float_id = self.properties[self.ALLOCATION_ID] self._neutron_update_floating_ip(float_id, port_id=None, ignore_not_found=True) def needs_replace_with_prop_diff(self, changed_properties_set, after_props, before_props): if (self.ALLOCATION_ID in changed_properties_set or self.EIP in changed_properties_set): instance_id, ni_id = None, None if self.INSTANCE_ID in changed_properties_set: instance_id = after_props.get(self.INSTANCE_ID) if self.NETWORK_INTERFACE_ID in changed_properties_set: ni_id = after_props.get(self.NETWORK_INTERFACE_ID) return bool(instance_id or ni_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: if self.ALLOCATION_ID in prop_diff or self.EIP in prop_diff: self._handle_update_eipInfo(prop_diff) elif (self.INSTANCE_ID in prop_diff or self.NETWORK_INTERFACE_ID in prop_diff): self._handle_update_portInfo(prop_diff)
class MeteringRule(neutron.NeutronResource): """A resource to create rule for some label. Resource for allowing specified label to measure the traffic for a specific set of ip range. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( METERING_LABEL_ID, REMOTE_IP_PREFIX, DIRECTION, EXCLUDED, ) = ( 'metering_label_id', 'remote_ip_prefix', 'direction', 'excluded', ) ATTRIBUTES = ( DIRECTION_ATTR, EXCLUDED_ATTR, METERING_LABEL_ID_ATTR, REMOTE_IP_PREFIX_ATTR, ) = ( 'direction', 'excluded', 'metering_label_id', 'remote_ip_prefix', ) properties_schema = { METERING_LABEL_ID: properties.Schema( properties.Schema.STRING, _('The metering label ID to associate with this metering rule.'), required=True ), REMOTE_IP_PREFIX: properties.Schema( properties.Schema.STRING, _('Indicates remote IP prefix to be associated with this ' 'metering rule.'), required=True, ), DIRECTION: properties.Schema( properties.Schema.STRING, _('The direction in which metering rule is applied, ' 'either ingress or egress.'), default='ingress', constraints=[constraints.AllowedValues(( 'ingress', 'egress'))] ), EXCLUDED: properties.Schema( properties.Schema.BOOLEAN, _('Specify whether the remote_ip_prefix will be excluded or ' 'not from traffic counters of the metering label. For example ' 'to not count the traffic of a specific IP address of a range.'), default='False' ) } attributes_schema = { DIRECTION_ATTR: attributes.Schema( _('The direction in which metering rule is applied.'), type=attributes.Schema.STRING ), EXCLUDED_ATTR: attributes.Schema( _('Exclude state for cidr.'), type=attributes.Schema.STRING ), METERING_LABEL_ID_ATTR: attributes.Schema( _('The metering label ID to associate with this metering rule.'), type=attributes.Schema.STRING ), REMOTE_IP_PREFIX_ATTR: attributes.Schema( _('CIDR to be associated with this metering rule.'), type=attributes.Schema.STRING ), } def handle_create(self): props = self.prepare_properties( self.properties, self.physical_resource_name()) metering_label_rule = self.client().create_metering_label_rule( {'metering_label_rule': props})['metering_label_rule'] self.resource_id_set(metering_label_rule['id']) def _show_resource(self): return self.client().show_metering_label_rule( self.resource_id)['metering_label_rule'] def handle_delete(self): if not self.resource_id: return try: self.client().delete_metering_label_rule(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class MeteringLabel(neutron.NeutronResource): """A resource for creating neutron metering label. The idea is to meter this at the L3 routers levels. The point is to allow operators to configure IP ranges and to assign a label to them. For example we will be able to set two labels; one for the internal traffic, and the other one for the external traffic. Each label will measure the traffic for a specific set of IP range. Then, bandwidth measurement will be sent for each label to the Oslo notification system and could be collected by Ceilometer. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, DESCRIPTION, SHARED, ) = ( 'name', 'description', 'shared', ) ATTRIBUTES = ( NAME_ATTR, DESCRIPTION_ATTR, SHARED_ATTR, ) = ( 'name', 'description', 'shared', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of the metering label.') ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of the metering label.'), ), SHARED: properties.Schema( properties.Schema.BOOLEAN, _('Whether the metering label should be shared ' 'across all tenants.'), default=False, support_status=support.SupportStatus(version='2015.1'), ), } attributes_schema = { NAME_ATTR: attributes.Schema( _('Name of the metering label.'), type=attributes.Schema.STRING ), DESCRIPTION_ATTR: attributes.Schema( _('Description of the metering label.'), type=attributes.Schema.STRING ), SHARED_ATTR: attributes.Schema( _('Shared status of the metering label.'), type=attributes.Schema.STRING ), } def handle_create(self): props = self.prepare_properties( self.properties, self.physical_resource_name()) metering_label = self.client().create_metering_label( {'metering_label': props})['metering_label'] self.resource_id_set(metering_label['id']) def _show_resource(self): return self.client().show_metering_label( self.resource_id)['metering_label'] def handle_delete(self): if not self.resource_id: return try: self.client().delete_metering_label(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class IKEPolicy(neutron.NeutronResource): """A resource for IKE policy in Neutron. The Internet Key Exchange policy identifyies the authentication and encryption algorithm used during phase one and phase two negotiation of a VPN connection. """ required_service_extension = 'vpnaas' PROPERTIES = ( NAME, DESCRIPTION, AUTH_ALGORITHM, ENCRYPTION_ALGORITHM, PHASE1_NEGOTIATION_MODE, LIFETIME, PFS, IKE_VERSION, ) = ( 'name', 'description', 'auth_algorithm', 'encryption_algorithm', 'phase1_negotiation_mode', 'lifetime', 'pfs', 'ike_version', ) _LIFETIME_KEYS = ( LIFETIME_UNITS, LIFETIME_VALUE, ) = ( 'units', 'value', ) ATTRIBUTES = ( AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCRYPTION_ALGORITHM_ATTR, IKE_VERSION_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR, PHASE1_NEGOTIATION_MODE_ATTR, TENANT_ID, ) = ( 'auth_algorithm', 'description', 'encryption_algorithm', 'ike_version', 'lifetime', 'name', 'pfs', 'phase1_negotiation_mode', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the ike policy.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the ike policy.'), update_allowed=True), AUTH_ALGORITHM: properties.Schema( properties.Schema.STRING, _('Authentication hash algorithm for the ike policy.'), default='sha1', constraints=[ constraints.AllowedValues(['sha1']), ]), ENCRYPTION_ALGORITHM: properties.Schema(properties.Schema.STRING, _('Encryption algorithm for the ike policy.'), default='aes-128', constraints=[ constraints.AllowedValues( ['3des', 'aes-128', 'aes-192', 'aes-256']), ]), PHASE1_NEGOTIATION_MODE: properties.Schema(properties.Schema.STRING, _('Negotiation mode for the ike policy.'), default='main', constraints=[ constraints.AllowedValues(['main']), ]), LIFETIME: properties.Schema( properties.Schema.MAP, _('Safety assessment lifetime configuration for the ike policy.'), schema={ LIFETIME_UNITS: properties.Schema(properties.Schema.STRING, _('Safety assessment lifetime units.'), default='seconds', constraints=[ constraints.AllowedValues( ['seconds', 'kilobytes']), ]), LIFETIME_VALUE: properties.Schema( properties.Schema.INTEGER, _('Safety assessment lifetime value in specified ' 'units.'), default=3600), }), PFS: properties.Schema( properties.Schema.STRING, _('Perfect forward secrecy in lowercase for the ike policy.'), default='group5', constraints=[ constraints.AllowedValues(['group2', 'group5', 'group14']), ]), IKE_VERSION: properties.Schema(properties.Schema.STRING, _('Version for the ike policy.'), default='v1', constraints=[ constraints.AllowedValues(['v1', 'v2']), ]), } attributes_schema = { AUTH_ALGORITHM_ATTR: attributes.Schema( _('The authentication hash algorithm used by the ike policy.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the ike policy.'), type=attributes.Schema.STRING), ENCRYPTION_ALGORITHM_ATTR: attributes.Schema( _('The encryption algorithm used by the ike policy.'), type=attributes.Schema.STRING), IKE_VERSION_ATTR: attributes.Schema(_('The version of the ike policy.'), type=attributes.Schema.STRING), LIFETIME_ATTR: attributes.Schema(_( 'The safety assessment lifetime configuration for the ike ' 'policy.'), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_('The name of the ike policy.'), type=attributes.Schema.STRING), PFS_ATTR: attributes.Schema(_('The perfect forward secrecy of the ike policy.'), type=attributes.Schema.STRING), PHASE1_NEGOTIATION_MODE_ATTR: attributes.Schema(_('The negotiation mode of the ike policy.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema( _('The unique identifier of the tenant owning the ike policy.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_ikepolicy(self.resource_id)['ikepolicy'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) ikepolicy = self.client().create_ikepolicy({'ikepolicy': props})['ikepolicy'] self.resource_id_set(ikepolicy['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_ikepolicy(self.resource_id, {'ikepolicy': prop_diff}) def handle_delete(self): if not self.resource_id: return try: self.client().delete_ikepolicy(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class IPsecPolicy(neutron.NeutronResource): """A resource for IPsec policy in Neutron. The IP security policy specifying the authentication and encryption algorithm, and encapsulation mode used for the established VPN connection. """ required_service_extension = 'vpnaas' PROPERTIES = ( NAME, DESCRIPTION, TRANSFORM_PROTOCOL, ENCAPSULATION_MODE, AUTH_ALGORITHM, ENCRYPTION_ALGORITHM, LIFETIME, PFS, ) = ( 'name', 'description', 'transform_protocol', 'encapsulation_mode', 'auth_algorithm', 'encryption_algorithm', 'lifetime', 'pfs', ) _LIFETIME_KEYS = ( LIFETIME_UNITS, LIFETIME_VALUE, ) = ( 'units', 'value', ) ATTRIBUTES = ( AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCAPSULATION_MODE_ATTR, ENCRYPTION_ALGORITHM_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR, TENANT_ID, TRANSFORM_PROTOCOL_ATTR, ) = ( 'auth_algorithm', 'description', 'encapsulation_mode', 'encryption_algorithm', 'lifetime', 'name', 'pfs', 'tenant_id', 'transform_protocol', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the ipsec policy.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the ipsec policy.'), update_allowed=True), TRANSFORM_PROTOCOL: properties.Schema(properties.Schema.STRING, _('Transform protocol for the ipsec policy.'), default='esp', constraints=[ constraints.AllowedValues( ['esp', 'ah', 'ah-esp']), ]), ENCAPSULATION_MODE: properties.Schema(properties.Schema.STRING, _('Encapsulation mode for the ipsec policy.'), default='tunnel', constraints=[ constraints.AllowedValues( ['tunnel', 'transport']), ]), AUTH_ALGORITHM: properties.Schema( properties.Schema.STRING, _('Authentication hash algorithm for the ipsec policy.'), default='sha1', constraints=[ constraints.AllowedValues(['sha1']), ]), ENCRYPTION_ALGORITHM: properties.Schema(properties.Schema.STRING, _('Encryption algorithm for the ipsec policy.'), default='aes-128', constraints=[ constraints.AllowedValues( ['3des', 'aes-128', 'aes-192', 'aes-256']), ]), LIFETIME: properties.Schema( properties.Schema.MAP, _('Safety assessment lifetime configuration for the ipsec ' 'policy.'), schema={ LIFETIME_UNITS: properties.Schema(properties.Schema.STRING, _('Safety assessment lifetime units.'), default='seconds', constraints=[ constraints.AllowedValues( ['seconds', 'kilobytes']), ]), LIFETIME_VALUE: properties.Schema( properties.Schema.INTEGER, _('Safety assessment lifetime value in specified ' 'units.'), default=3600), }), PFS: properties.Schema(properties.Schema.STRING, _('Perfect forward secrecy for the ipsec policy.'), default='group5', constraints=[ constraints.AllowedValues( ['group2', 'group5', 'group14']), ]), } attributes_schema = { AUTH_ALGORITHM_ATTR: attributes.Schema( _('The authentication hash algorithm of the ipsec policy.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the ipsec policy.'), type=attributes.Schema.STRING), ENCAPSULATION_MODE_ATTR: attributes.Schema(_('The encapsulation mode of the ipsec policy.'), type=attributes.Schema.STRING), ENCRYPTION_ALGORITHM_ATTR: attributes.Schema(_('The encryption algorithm of the ipsec policy.'), type=attributes.Schema.STRING), LIFETIME_ATTR: attributes.Schema(_( 'The safety assessment lifetime configuration of the ipsec ' 'policy.'), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_('The name of the ipsec policy.'), type=attributes.Schema.STRING), PFS_ATTR: attributes.Schema( _('The perfect forward secrecy of the ipsec policy.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema( _('The unique identifier of the tenant owning the ipsec policy.'), type=attributes.Schema.STRING), TRANSFORM_PROTOCOL_ATTR: attributes.Schema(_('The transform protocol of the ipsec policy.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_ipsecpolicy(self.resource_id)['ipsecpolicy'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) ipsecpolicy = self.client().create_ipsecpolicy({'ipsecpolicy': props})['ipsecpolicy'] self.resource_id_set(ipsecpolicy['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_ipsecpolicy(self.resource_id, {'ipsecpolicy': prop_diff}) def handle_delete(self): if not self.resource_id: return try: self.client().delete_ipsecpolicy(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class VPNService(neutron.NeutronResource): """A resource for VPN service in Neutron. VPN service is a high level object that associates VPN with a specific subnet and router. """ required_service_extension = 'vpnaas' PROPERTIES = (NAME, DESCRIPTION, ADMIN_STATE_UP, SUBNET_ID, SUBNET, ROUTER_ID, ROUTER) = ('name', 'description', 'admin_state_up', 'subnet_id', 'subnet', 'router_id', 'router') ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, DESCRIPTION_ATTR, NAME_ATTR, ROUTER_ID_ATTR, STATUS, SUBNET_ID_ATTR, TENANT_ID, ) = ( 'admin_state_up', 'description', 'name', 'router_id', 'status', 'subnet_id', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the vpn service.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the vpn service.'), update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('Administrative state for the vpn service.'), default=True, update_allowed=True), SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % SUBNET, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.subnet')]), SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet in which the vpn service will be created.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.subnet')]), ROUTER_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the router to which the vpn service ' 'will be inserted.'), support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s') % ROUTER, version='2015.1', previous_status=support.SupportStatus(version='2013.2'))), constraints=[constraints.CustomConstraint('neutron.router')]), ROUTER: properties.Schema( properties.Schema.STRING, _('The router to which the vpn service will be inserted.'), support_status=support.SupportStatus(version='2015.1'), required=True, constraints=[constraints.CustomConstraint('neutron.router')]) } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema(_('The administrative state of the vpn service.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the vpn service.'), type=attributes.Schema.STRING), NAME_ATTR: attributes.Schema(_('The name of the vpn service.'), type=attributes.Schema.STRING), ROUTER_ID_ATTR: attributes.Schema(_( 'The unique identifier of the router to which the vpn service ' 'was inserted.'), type=attributes.Schema.STRING), STATUS: attributes.Schema(_('The status of the vpn service.'), type=attributes.Schema.STRING), SUBNET_ID_ATTR: attributes.Schema(_( 'The unique identifier of the subnet in which the vpn service ' 'was created.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema( _('The unique identifier of the tenant owning the vpn service.'), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.SUBNET], value_path=[self.SUBNET_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet'), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.ROUTER], value_path=[self.ROUTER_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.ROUTER], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='router'), ] def _show_resource(self): return self.client().show_vpnservice(self.resource_id)['vpnservice'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['subnet_id'] = props.pop(self.SUBNET) props['router_id'] = props.pop(self.ROUTER) vpnservice = self.client().create_vpnservice({'vpnservice': props})['vpnservice'] self.resource_id_set(vpnservice['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_vpnservice(self.resource_id, {'vpnservice': prop_diff}) def handle_delete(self): try: self.client().delete_vpnservice(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class IPsecSiteConnection(neutron.NeutronResource): """A resource for IPsec site connection in Neutron. This resource has details for the site-to-site IPsec connection, including the peer CIDRs, MTU, peer address, DPD settings and status. """ required_service_extension = 'vpnaas' PROPERTIES = ( NAME, DESCRIPTION, PEER_ADDRESS, PEER_ID, PEER_CIDRS, MTU, DPD, PSK, INITIATOR, ADMIN_STATE_UP, IKEPOLICY_ID, IPSECPOLICY_ID, VPNSERVICE_ID, ) = ( 'name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'mtu', 'dpd', 'psk', 'initiator', 'admin_state_up', 'ikepolicy_id', 'ipsecpolicy_id', 'vpnservice_id', ) _DPD_KEYS = ( DPD_ACTIONS, DPD_INTERVAL, DPD_TIMEOUT, ) = ( 'actions', 'interval', 'timeout', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, AUTH_MODE, DESCRIPTION_ATTR, DPD_ATTR, IKEPOLICY_ID_ATTR, INITIATOR_ATTR, IPSECPOLICY_ID_ATTR, MTU_ATTR, NAME_ATTR, PEER_ADDRESS_ATTR, PEER_CIDRS_ATTR, PEER_ID_ATTR, PSK_ATTR, ROUTE_MODE, STATUS, TENANT_ID, VPNSERVICE_ID_ATTR, ) = ( 'admin_state_up', 'auth_mode', 'description', 'dpd', 'ikepolicy_id', 'initiator', 'ipsecpolicy_id', 'mtu', 'name', 'peer_address', 'peer_cidrs', 'peer_id', 'psk', 'route_mode', 'status', 'tenant_id', 'vpnservice_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the ipsec site connection.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the ipsec site connection.'), update_allowed=True), PEER_ADDRESS: properties.Schema( properties.Schema.STRING, _('Remote branch router public IPv4 address or IPv6 address or ' 'FQDN.'), required=True), PEER_ID: properties.Schema(properties.Schema.STRING, _('Remote branch router identity.'), required=True), PEER_CIDRS: properties.Schema( properties.Schema.LIST, _('Remote subnet(s) in CIDR format.'), required=True, schema=properties.Schema( properties.Schema.STRING, constraints=[constraints.CustomConstraint('net_cidr')])), MTU: properties.Schema( properties.Schema.INTEGER, _('Maximum transmission unit size (in bytes) for the ipsec site ' 'connection.'), default=1500), DPD: properties.Schema( properties.Schema.MAP, _('Dead Peer Detection protocol configuration for the ipsec site ' 'connection.'), schema={ DPD_ACTIONS: properties.Schema(properties.Schema.STRING, _('Controls DPD protocol mode.'), default='hold', constraints=[ constraints.AllowedValues([ 'clear', 'disabled', 'hold', 'restart', 'restart-by-peer' ]), ]), DPD_INTERVAL: properties.Schema(properties.Schema.INTEGER, _('Number of seconds for the DPD delay.'), default=30), DPD_TIMEOUT: properties.Schema(properties.Schema.INTEGER, _('Number of seconds for the DPD timeout.'), default=120), }), PSK: properties.Schema( properties.Schema.STRING, _('Pre-shared key string for the ipsec site connection.'), required=True), INITIATOR: properties.Schema( properties.Schema.STRING, _('Initiator state in lowercase for the ipsec site connection.'), default='bi-directional', constraints=[ constraints.AllowedValues(['bi-directional', 'response-only']), ]), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('Administrative state for the ipsec site connection.'), default=True, update_allowed=True), IKEPOLICY_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the ike policy associated with the ' 'ipsec site connection.'), required=True), IPSECPOLICY_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the ipsec policy associated with the ' 'ipsec site connection.'), required=True), VPNSERVICE_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the vpn service associated with the ' 'ipsec site connection.'), required=True), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema( _('The administrative state of the ipsec site connection.'), type=attributes.Schema.STRING), AUTH_MODE: attributes.Schema( _('The authentication mode of the ipsec site connection.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the ipsec site connection.'), type=attributes.Schema.STRING), DPD_ATTR: attributes.Schema(_( 'The dead peer detection protocol configuration of the ipsec ' 'site connection.'), type=attributes.Schema.MAP), IKEPOLICY_ID_ATTR: attributes.Schema(_( 'The unique identifier of ike policy associated with the ipsec ' 'site connection.'), type=attributes.Schema.STRING), INITIATOR_ATTR: attributes.Schema(_('The initiator of the ipsec site connection.'), type=attributes.Schema.STRING), IPSECPOLICY_ID_ATTR: attributes.Schema(_( 'The unique identifier of ipsec policy associated with the ' 'ipsec site connection.'), type=attributes.Schema.STRING), MTU_ATTR: attributes.Schema(_( 'The maximum transmission unit size (in bytes) of the ipsec ' 'site connection.'), type=attributes.Schema.STRING), NAME_ATTR: attributes.Schema(_('The name of the ipsec site connection.'), type=attributes.Schema.STRING), PEER_ADDRESS_ATTR: attributes.Schema(_( 'The remote branch router public IPv4 address or IPv6 address ' 'or FQDN.'), type=attributes.Schema.STRING), PEER_CIDRS_ATTR: attributes.Schema(_( 'The remote subnet(s) in CIDR format of the ipsec site ' 'connection.'), type=attributes.Schema.LIST), PEER_ID_ATTR: attributes.Schema(_( 'The remote branch router identity of the ipsec site ' 'connection.'), type=attributes.Schema.STRING), PSK_ATTR: attributes.Schema( _('The pre-shared key string of the ipsec site connection.'), type=attributes.Schema.STRING), ROUTE_MODE: attributes.Schema(_('The route mode of the ipsec site connection.'), type=attributes.Schema.STRING), STATUS: attributes.Schema(_('The status of the ipsec site connection.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_( 'The unique identifier of the tenant owning the ipsec site ' 'connection.'), type=attributes.Schema.STRING), VPNSERVICE_ID_ATTR: attributes.Schema(_( 'The unique identifier of vpn service associated with the ipsec ' 'site connection.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_ipsec_site_connection( self.resource_id)['ipsec_site_connection'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) ipsec_site_connection = self.client().create_ipsec_site_connection( {'ipsec_site_connection': props})['ipsec_site_connection'] self.resource_id_set(ipsec_site_connection['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_ipsec_site_connection( self.resource_id, {'ipsec_site_connection': prop_diff}) def handle_delete(self): try: self.client().delete_ipsec_site_connection(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin): support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE, COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD, HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS, INSTANCE_ID, ) = ( 'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize', 'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags', 'InstanceId', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) _UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE) = ( 'AutoScalingRollingUpdate') _ROLLING_UPDATE_SCHEMA_KEYS = (MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME) = ('MinInstancesInService', 'MaxBatchSize', 'PauseTime') ATTRIBUTES = (INSTANCE_LIST, ) = ('InstanceList', ) properties_schema = { AVAILABILITY_ZONES: properties.Schema(properties.Schema.LIST, _('Not Implemented.'), required=True), LAUNCH_CONFIGURATION_NAME: properties.Schema( properties.Schema.STRING, _('The reference to a LaunchConfiguration resource.'), update_allowed=True), INSTANCE_ID: properties.Schema( properties.Schema.STRING, _('The ID of an existing instance to use to ' 'create the Auto Scaling group. If specify this property, ' 'will create the group use an existing instance instead of ' 'a launch configuration.'), constraints=[constraints.CustomConstraint("nova.server")]), MAX_SIZE: properties.Schema(properties.Schema.INTEGER, _('Maximum number of instances in the group.'), required=True, update_allowed=True), MIN_SIZE: properties.Schema(properties.Schema.INTEGER, _('Minimum number of instances in the group.'), required=True, update_allowed=True), COOLDOWN: properties.Schema(properties.Schema.INTEGER, _('Cooldown period, in seconds.'), update_allowed=True), DESIRED_CAPACITY: properties.Schema(properties.Schema.INTEGER, _('Desired initial number of instances.'), update_allowed=True), HEALTH_CHECK_GRACE_PERIOD: properties.Schema(properties.Schema.INTEGER, _('Not Implemented.'), implemented=False), HEALTH_CHECK_TYPE: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), constraints=[ constraints.AllowedValues(['EC2', 'ELB']), ], implemented=False), LOAD_BALANCER_NAMES: properties.Schema(properties.Schema.LIST, _('List of LoadBalancer resources.')), VPCZONE_IDENTIFIER: properties.Schema( properties.Schema.LIST, _('Use only with Neutron, to list the internal subnet to ' 'which the instance will be attached; ' 'needed only if multiple exist; ' 'list length must be exactly 1.'), schema=properties.Schema( properties.Schema.STRING, _('UUID of the internal subnet to which the instance ' 'will be attached.'))), TAGS: properties.Schema(properties.Schema.LIST, _('Tags to attach to this group.'), schema=properties.Schema( properties.Schema.MAP, schema={ TAG_KEY: properties.Schema(properties.Schema.STRING, required=True), TAG_VALUE: properties.Schema(properties.Schema.STRING, required=True), }, )), } attributes_schema = { INSTANCE_LIST: attributes.Schema(_("A comma-delimited list of server ip addresses. " "(Heat extension)."), type=attributes.Schema.STRING), } rolling_update_schema = { MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER, default=0), MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER, default=1), PAUSE_TIME: properties.Schema(properties.Schema.STRING, default='PT0S') } update_policy_schema = { ROLLING_UPDATE: properties.Schema(properties.Schema.MAP, schema=rolling_update_schema) } def handle_create(self): return self.create_with_template(self.child_template()) def _get_members(self, group_id): members = [] for res in self.stack.iter_resources(cfg.CONF.max_nested_stack_depth): if (res.type() in ['OS::Nova::Server'] and res.status == res.COMPLETE): members.append({ 'id': res.resource_id, 'name': res.name, 'group_id': group_id }) return members def _add_scheduler(self, group_id): task_args = { 'group_name': 'groupwatch', 'job_name': group_id, 'job_type': 'period', 'trigger_type': 'SIMPLE_TRIGGER', 'interval': 240, 'cover_flag': 'true', 'end_time': 4076884800000, 'meta_data': { 'group_id': group_id, 'project_id': self.context.tenant_id } } rsp = self.client('scheduler').scheduler.create(**task_args) return rsp.get('job_id') def _create_groupwatch(self): if not cfg.CONF.FusionSphere.groupwatch_enable: return group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id members = self._get_members(group_id) job_id = self._add_scheduler(group_id) kwargs = { 'id': group_id, 'name': self.name, 'type': 'VM', 'data': { 'scheduler_job_id': job_id }, 'members': members } self.client('groupwatch').groups.create(**kwargs) def _make_launch_config_resource(self, name, props): lc_res_type = 'AWS::AutoScaling::LaunchConfiguration' lc_res_def = rsrc_defn.ResourceDefinition(name, lc_res_type, props) lc_res = resource.Resource(name, lc_res_def, self.stack) return lc_res def _get_conf_properties(self): instance_id = self.properties.get(self.INSTANCE_ID) if instance_id: server = self.client_plugin('nova').get_server(instance_id) instance_props = { 'ImageId': server.image['id'], 'InstanceType': server.flavor['id'], 'KeyName': server.key_name, 'SecurityGroups': [sg['name'] for sg in server.security_groups] } conf = self._make_launch_config_resource(self.name, instance_props) props = function.resolve(conf.properties.data) else: conf, props = super(AutoScalingGroup, self)._get_conf_properties() vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER) if vpc_zone_ids: props['SubnetId'] = vpc_zone_ids[0] return conf, props def check_create_complete(self, task): """Update cooldown timestamp after create succeeds.""" done = super(AutoScalingGroup, self).check_create_complete(task) if done: self._create_groupwatch() self._finished_scaling( "%s : %s" % (sc_util.CFN_EXACT_CAPACITY, grouputils.get_size(self))) return done def check_update_complete(self, cookie): """Update the cooldown timestamp after update succeeds.""" done = super(AutoScalingGroup, self).check_update_complete(cookie) if done: self._finished_scaling( "%s : %s" % (sc_util.CFN_EXACT_CAPACITY, grouputils.get_size(self))) return done def _get_new_capacity(self, capacity, adjustment, adjustment_type=sc_util.CFN_EXACT_CAPACITY, min_adjustment_step=None): lower = self.properties[self.MIN_SIZE] upper = self.properties[self.MAX_SIZE] return sc_util.calculate_new_capacity(capacity, adjustment, adjustment_type, min_adjustment_step, lower, upper) def _update_groupwatch(self): if not cfg.CONF.FusionSphere.groupwatch_enable: return group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id members = self._get_members(group_id) kwargs = { 'id': group_id, 'name': self.name, 'type': 'VM', 'members': members } self.client('groupwatch').groups.update(group_id, **kwargs) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Updates self.properties, if Properties has changed. If Properties has changed, update self.properties, so we get the new values during any subsequent adjustment. """ if tmpl_diff: # parse update policy if 'UpdatePolicy' in tmpl_diff: up = json_snippet.update_policy(self.update_policy_schema, self.context) self.update_policy = up self.properties = json_snippet.properties(self.properties_schema, self.context) if prop_diff: # Replace instances first if launch configuration has changed self._try_rolling_update(prop_diff) # Update will happen irrespective of whether auto-scaling # is in progress or not. capacity = grouputils.get_size(self) desired_capacity = self.properties[self.DESIRED_CAPACITY] or capacity new_capacity = self._get_new_capacity(capacity, desired_capacity) self.resize(new_capacity) def adjust(self, adjustment, adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY, min_adjustment_step=None): """Adjust the size of the scaling group if the cooldown permits.""" if not self._is_scaling_allowed(): LOG.info( _LI("%(name)s NOT performing scaling adjustment, " "cooldown %(cooldown)s"), { 'name': self.name, 'cooldown': self.properties[self.COOLDOWN] }) raise exception.NoActionRequired() capacity = grouputils.get_size(self) new_capacity = self._get_new_capacity(capacity, adjustment, adjustment_type, min_adjustment_step) changed_size = new_capacity != capacity # send a notification before, on-error and on-success. notif = { 'stack': self.stack, 'adjustment': adjustment, 'adjustment_type': adjustment_type, 'capacity': capacity, 'groupname': self.FnGetRefId(), 'message': _("Start resizing the group %(group)s") % { 'group': self.FnGetRefId() }, 'suffix': 'start', } notification.send(**notif) try: self.resize(new_capacity) except Exception as resize_ex: with excutils.save_and_reraise_exception(): try: notif.update({ 'suffix': 'error', 'message': six.text_type(resize_ex), 'capacity': grouputils.get_size(self), }) notification.send(**notif) except Exception: LOG.exception(_LE('Failed sending error notification')) else: notif.update({ 'suffix': 'end', 'capacity': new_capacity, 'message': _("End resizing the group %(group)s") % { 'group': notif['groupname'] }, }) notification.send(**notif) finally: self._update_groupwatch() self._finished_scaling("%s : %s" % (adjustment_type, adjustment), changed_size=changed_size) return changed_size def _tags(self): """Add Identifying Tags to all servers in the group. This is so the Dimensions received from cfn-push-stats all include the groupname and stack id. Note: the group name must match what is returned from FnGetRefId """ autoscaling_tag = [{ self.TAG_KEY: 'metering.AutoScalingGroupName', self.TAG_VALUE: self.FnGetRefId() }] return super(AutoScalingGroup, self)._tags() + autoscaling_tag def validate(self): # check validity of group size min_size = self.properties[self.MIN_SIZE] max_size = self.properties[self.MAX_SIZE] if max_size < min_size: msg = _("MinSize can not be greater than MaxSize") raise exception.StackValidationFailed(message=msg) if min_size < 0: msg = _("The size of AutoScalingGroup can not be less than zero") raise exception.StackValidationFailed(message=msg) if self.properties[self.DESIRED_CAPACITY] is not None: desired_capacity = self.properties[self.DESIRED_CAPACITY] if desired_capacity < min_size or desired_capacity > max_size: msg = _("DesiredCapacity must be between MinSize and MaxSize") raise exception.StackValidationFailed(message=msg) # TODO(pasquier-s): once Neutron is able to assign subnets to # availability zones, it will be possible to specify multiple subnets. # For now, only one subnet can be specified. The bug #1096017 tracks # this issue. if (self.properties.get(self.VPCZONE_IDENTIFIER) and len(self.properties[self.VPCZONE_IDENTIFIER]) != 1): raise exception.NotSupported(feature=_("Anything other than one " "VPCZoneIdentifier")) # validate properties InstanceId and LaunchConfigurationName # for aws auto scaling group. # should provide just only one of if self.type() == 'AWS::AutoScaling::AutoScalingGroup': instanceId = self.properties.get(self.INSTANCE_ID) launch_config = self.properties.get(self.LAUNCH_CONFIGURATION_NAME) if bool(instanceId) == bool(launch_config): msg = _("Either 'InstanceId' or 'LaunchConfigurationName' " "must be provided.") raise exception.StackValidationFailed(message=msg) super(AutoScalingGroup, self).validate() def _resolve_attribute(self, name): """Resolves the resource's attributes. heat extension: "InstanceList" returns comma delimited list of server ip addresses. """ if name == self.INSTANCE_LIST: return u','.join( inst.FnGetAtt('PublicIp') for inst in grouputils.get_members(self)) or None def child_template(self): if self.properties[self.DESIRED_CAPACITY]: num_instances = self.properties[self.DESIRED_CAPACITY] else: num_instances = self.properties[self.MIN_SIZE] return self._create_template(num_instances) def _delete_groupwatch(self): if not cfg.CONF.FusionSphere.groupwatch_enable: return if not self.resource_id: return group = None try: group = self.client('groupwatch').groups.get(self.resource_id) except Exception as ex: self.client_plugin('groupwatch').ignore_not_found(ex) return try: if (group and group.get('group') and 'data' in group.get('group')): scheduler_job_id = \ group.get('group').get('data').get('scheduler_job_id') self.client('scheduler').scheduler.delete(scheduler_job_id) except (AttributeError, KeyError): # do nothing pass except Exception as ex: self.client_plugin('scheduler').ignore_not_found(ex) try: self.client('groupwatch').groups.delete(self.resource_id) except Exception as ex: self.client_plugin('groupwatch').ignore_not_found(ex) def handle_delete(self): self._delete_groupwatch() return self.delete_nested() def handle_metadata_reset(self): metadata = self.metadata_get() if 'scaling_in_progress' in metadata: metadata['scaling_in_progress'] = False self.metadata_set(metadata)
class FloatingIPAssociation(neutron.NeutronResource): """A resource for associating floating ips and ports. This resource allows associating a floating IP to a port with at least one IP address to associate with this floating IP. """ PROPERTIES = ( FLOATINGIP_ID, PORT_ID, FIXED_IP_ADDRESS, ) = ( 'floatingip_id', 'port_id', 'fixed_ip_address', ) properties_schema = { FLOATINGIP_ID: properties.Schema(properties.Schema.STRING, _('ID of the floating IP to associate.'), required=True, update_allowed=True), PORT_ID: properties.Schema( properties.Schema.STRING, _('ID of an existing port with at least one IP address to ' 'associate with this floating IP.'), required=True, update_allowed=True, constraints=[constraints.CustomConstraint('neutron.port')]), FIXED_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address to use if the port has multiple addresses.'), update_allowed=True, constraints=[constraints.CustomConstraint('ip_addr')]), } def add_dependencies(self, deps): super(FloatingIPAssociation, self).add_dependencies(deps) for resource in six.itervalues(self.stack): if resource.has_interface('OS::Neutron::RouterInterface'): def port_on_subnet(resource, subnet): if not resource.has_interface('OS::Neutron::Port'): return False fixed_ips = resource.properties.get( port.Port.FIXED_IPS) or [] for fixed_ip in fixed_ips: port_subnet = (fixed_ip.get(port.Port.FIXED_IP_SUBNET) or fixed_ip.get( port.Port.FIXED_IP_SUBNET_ID)) return subnet == port_subnet return False interface_subnet = (resource.properties.get( router.RouterInterface.SUBNET) or resource.properties.get( router.RouterInterface.SUBNET_ID)) for d in deps.graph()[self]: if port_on_subnet(d, interface_subnet): deps += (self, resource) break def handle_create(self): props = self.prepare_properties(self.properties, self.name) floatingip_id = props.pop(self.FLOATINGIP_ID) self.client().update_floatingip(floatingip_id, {'floatingip': props}) self.resource_id_set(self.id) def handle_delete(self): if not self.resource_id: return with self.client_plugin().ignore_not_found: self.client().update_floatingip( self.properties[self.FLOATINGIP_ID], {'floatingip': { 'port_id': None }}) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: floatingip_id = self.properties[self.FLOATINGIP_ID] port_id = self.properties[self.PORT_ID] # if the floatingip_id is changed, disassociate the port which # associated with the old floatingip_id if self.FLOATINGIP_ID in prop_diff: with self.client_plugin().ignore_not_found: self.client().update_floatingip( floatingip_id, {'floatingip': { 'port_id': None }}) # associate the floatingip with the new port floatingip_id = (prop_diff.get(self.FLOATINGIP_ID) or floatingip_id) port_id = prop_diff.get(self.PORT_ID) or port_id fixed_ip_address = (prop_diff.get(self.FIXED_IP_ADDRESS) or self.properties[self.FIXED_IP_ADDRESS]) request_body = { 'floatingip': { 'port_id': port_id, 'fixed_ip_address': fixed_ip_address } } self.client().update_floatingip(floatingip_id, request_body) self.resource_id_set(self.id)
class GlanceImage(resource.Resource): """A resource managing images in Glance. A resource provides managing images that are meant to be used with other services. """ support_status = support.SupportStatus(version='2014.2') PROPERTIES = ( NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED, DISK_FORMAT, CONTAINER_FORMAT, LOCATION ) = ( 'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected', 'disk_format', 'container_format', 'location' ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name for the image. The name of an image is not ' 'unique to a Image Service node.') ), IMAGE_ID: properties.Schema( properties.Schema.STRING, _('The image ID. Glance will generate a UUID if not specified.') ), IS_PUBLIC: properties.Schema( properties.Schema.BOOLEAN, _('Scope of image accessibility. Public or private. ' 'Default value is False means private.'), default=False, ), MIN_DISK: properties.Schema( properties.Schema.INTEGER, _('Amount of disk space (in GB) required to boot image. ' 'Default value is 0 if not specified ' 'and means no limit on the disk size.'), constraints=[ constraints.Range(min=0), ], default=0 ), MIN_RAM: properties.Schema( properties.Schema.INTEGER, _('Amount of ram (in MB) required to boot image. Default value ' 'is 0 if not specified and means no limit on the ram size.'), constraints=[ constraints.Range(min=0), ], default=0 ), PROTECTED: properties.Schema( properties.Schema.BOOLEAN, _('Whether the image can be deleted. If the value is True, ' 'the image is protected and cannot be deleted.'), default=False ), DISK_FORMAT: properties.Schema( properties.Schema.STRING, _('Disk format of image.'), required=True, constraints=[ constraints.AllowedValues(['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']) ] ), CONTAINER_FORMAT: properties.Schema( properties.Schema.STRING, _('Container format of image.'), required=True, constraints=[ constraints.AllowedValues(['ami', 'ari', 'aki', 'bare', 'ova', 'ovf']) ] ), LOCATION: properties.Schema( properties.Schema.STRING, _('URL where the data for this image already resides. For ' 'example, if the image data is stored in swift, you could ' 'specify "swift://example.com/container/obj".'), required=True, ), } default_client_name = 'glance' entity = 'images' def handle_create(self): args = dict((k, v) for k, v in self.properties.items() if v is not None) image_id = self.client().images.create(**args).id self.resource_id_set(image_id) return image_id def check_create_complete(self, image_id): image = self.client().images.get(image_id) return image.status == 'active' def _show_resource(self): if self.glance().version == 1.0: return super(GlanceImage, self)._show_resource() else: image = self.glance().images.get(self.resource_id) return dict(image) def validate(self): super(GlanceImage, self).validate() container_format = self.properties[self.CONTAINER_FORMAT] if (container_format in ['ami', 'ari', 'aki'] and self.properties[self.DISK_FORMAT] != container_format): msg = _("Invalid mix of disk and container formats. When " "setting a disk or container format to one of 'aki', " "'ari', or 'ami', the container and disk formats must " "match.") raise exception.StackValidationFailed(message=msg) def get_live_resource_data(self): image_data = super(GlanceImage, self).get_live_resource_data() if image_data.get('status') in ('deleted', 'killed'): raise exception.EntityNotFound(entity='Resource', name=self.name) return image_data def parse_live_resource_data(self, resource_properties, resource_data): image_reality = {} # NOTE(prazumovsky): At first, there's no way to get location from # glance; at second, location property is doubtful, because glance # client v2 doesn't use location, it uses locations. So, we should # get location property from resource properties. if self.client().version == 1.0: image_reality.update( {self.LOCATION: resource_properties[self.LOCATION]}) for key in self.PROPERTIES: if key == self.LOCATION: continue if key == self.IMAGE_ID: if (resource_properties.get(self.IMAGE_ID) is not None or resource_data.get(self.IMAGE_ID) != self.resource_id): image_reality.update({self.IMAGE_ID: resource_data.get( self.IMAGE_ID)}) else: image_reality.update({self.IMAGE_ID: None}) else: image_reality.update({key: resource_data.get(key)}) return image_reality
class Subnet(resource.Resource): PROPERTIES = ( AVAILABILITY_ZONE, CIDR_BLOCK, VPC_ID, TAGS, ) = ( 'AvailabilityZone', 'CidrBlock', 'VpcId', 'Tags', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) ATTRIBUTES = ( AVAILABILITY_ZONE, ) properties_schema = { AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('Availability zone in which you want the subnet.') ), CIDR_BLOCK: properties.Schema( properties.Schema.STRING, _('CIDR block to apply to subnet.'), required=True ), VPC_ID: properties.Schema( properties.Schema.STRING, _('Ref structure that contains the ID of the VPC on which you ' 'want to create the subnet.'), required=True ), TAGS: properties.Schema( properties.Schema.LIST, schema=properties.Schema( properties.Schema.MAP, _('List of tags to attach to this resource.'), schema={ TAG_KEY: properties.Schema( properties.Schema.STRING, required=True ), TAG_VALUE: properties.Schema( properties.Schema.STRING, required=True ), }, implemented=False, ) ), } attributes_schema = { AVAILABILITY_ZONE: attributes.Schema( _('Availability Zone of the subnet.'), type=attributes.Schema.STRING ), } default_client_name = 'neutron' def handle_create(self): # TODO(sbaker) Verify that this CidrBlock is within the vpc CidrBlock network_id = self.properties.get(self.VPC_ID) props = { 'network_id': network_id, 'cidr': self.properties.get(self.CIDR_BLOCK), 'name': self.physical_resource_name(), 'ip_version': 4 } subnet = self.client().create_subnet({'subnet': props})['subnet'] self.resource_id_set(subnet['id']) router = vpc.VPC.router_for_vpc(self.client(), network_id) if router: self.client().add_interface_router( router['id'], {'subnet_id': subnet['id']}) def handle_delete(self): if self.resource_id is None: return network_id = self.properties.get(self.VPC_ID) subnet_id = self.resource_id with self.client_plugin().ignore_not_found: router = vpc.VPC.router_for_vpc(self.client(), network_id) if router: self.client().remove_interface_router( router['id'], {'subnet_id': subnet_id}) with self.client_plugin().ignore_not_found: self.client().delete_subnet(subnet_id) def _resolve_attribute(self, name): if name == self.AVAILABILITY_ZONE: return self.properties.get(self.AVAILABILITY_ZONE)
class AddressScope(neutron.NeutronResource): """A resource for Neutron address scope. This resource can be associated with multiple subnet pools in a one-to-many relationship. The subnet pools under an address scope must not overlap. """ required_service_extension = 'address-scope' support_status = support.SupportStatus(version='6.0.0') PROPERTIES = ( NAME, SHARED, TENANT_ID, IP_VERSION, ) = ( 'name', 'shared', 'tenant_id', 'ip_version', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('The name for the address scope.'), update_allowed=True), SHARED: properties.Schema( properties.Schema.BOOLEAN, _('Whether the address scope should be shared to other ' 'tenants. Note that the default policy setting ' 'restricts usage of this attribute to administrative ' 'users only, and restricts changing of shared address scope ' 'to unshared with update.'), default=False, update_allowed=True), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The owner tenant ID of the address scope. Only ' 'administrative users can specify a tenant ID ' 'other than their own.'), constraints=[constraints.CustomConstraint('keystone.project')]), IP_VERSION: properties.Schema( properties.Schema.INTEGER, _('Address family of the address scope, which is 4 or 6.'), default=4, constraints=[ constraints.AllowedValues([4, 6]), ]), } def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) address_scope = self.client().create_address_scope( {'address_scope': props})['address_scope'] self.resource_id_set(address_scope['id']) def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: self.client().delete_address_scope(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_address_scope(self.resource_id, {'address_scope': prop_diff}) def _show_resource(self): return self.client().show_address_scope( self.resource_id)['address_scope']
class LoadBalancer(neutron.NeutronResource): """A resource for creating LBaaS v2 Load Balancers. This resource creates and manages Neutron LBaaS v2 Load Balancers, which allows traffic to be directed between servers. """ support_status = support.SupportStatus(version='6.0.0') required_service_extension = 'lbaasv2' PROPERTIES = ( DESCRIPTION, NAME, PROVIDER, VIP_ADDRESS, VIP_SUBNET, ADMIN_STATE_UP, TENANT_ID ) = ( 'description', 'name', 'provider', 'vip_address', 'vip_subnet', 'admin_state_up', 'tenant_id' ) ATTRIBUTES = ( VIP_ADDRESS_ATTR, VIP_PORT_ATTR, VIP_SUBNET_ATTR ) = ( 'vip_address', 'vip_port_id', 'vip_subnet_id' ) properties_schema = { DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of this Load Balancer.'), update_allowed=True, default='' ), NAME: properties.Schema( properties.Schema.STRING, _('Name of this Load Balancer.'), update_allowed=True ), PROVIDER: properties.Schema( properties.Schema.STRING, _('Provider for this Load Balancer.'), constraints=[constraints.AllowedValues(['vlb'])] ), VIP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address for the VIP.'), constraints=[ constraints.CustomConstraint('ip_addr') ], ), VIP_SUBNET: properties.Schema( properties.Schema.STRING, _('The name or ID of the subnet on which to allocate the VIP ' 'address.'), constraints=[ constraints.CustomConstraint('neutron.subnet') ], required=True ), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of this Load Balancer.'), default=True, update_allowed=True, constraints=[constraints.AllowedValues(['True'])] ), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the tenant who owns the Load Balancer. Only ' 'administrative users can specify a tenant ID other than ' 'their own.'), constraints=[ constraints.CustomConstraint('keystone.project') ], ) } attributes_schema = { VIP_ADDRESS_ATTR: attributes.Schema( _('The VIP address of the LoadBalancer.'), type=attributes.Schema.STRING ), VIP_PORT_ATTR: attributes.Schema( _('The VIP port of the LoadBalancer.'), type=attributes.Schema.STRING ), VIP_SUBNET_ATTR: attributes.Schema( _('The VIP subnet of the LoadBalancer.'), type=attributes.Schema.STRING ) } def handle_create(self): properties = self.prepare_properties( self.properties, self.physical_resource_name() ) self.client_plugin().resolve_subnet( properties, self.VIP_SUBNET, 'vip_subnet_id') lb = self.client().create_loadbalancer( {'loadbalancer': properties})['loadbalancer'] self.resource_id_set(lb['id']) def check_create_complete(self, data): return self.client_plugin().check_lb_status(self.resource_id) def _show_resource(self): return self.client().show_loadbalancer( self.resource_id)['loadbalancer'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_loadbalancer( self.resource_id, {'loadbalancer': prop_diff}) return prop_diff def check_update_complete(self, prop_diff): if prop_diff: return self.client_plugin().check_lb_status(self.resource_id) return True def handle_delete(self): pass def check_delete_complete(self, data): if self.resource_id is None: return True try: try: if self.client_plugin().check_lb_status(self.resource_id): self.client().delete_loadbalancer(self.resource_id) except exception.ResourceInError: # Still try to delete loadbalancer in error state self.client().delete_loadbalancer(self.resource_id) except exceptions.NotFound: # Resource is gone return True return False
class NetworkGateway(neutron.NeutronResource): """Network Gateway resource in Neutron Network Gateway. Resource for connecting internal networks with specified devices. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, DEVICES, CONNECTIONS, ) = ( 'name', 'devices', 'connections', ) ATTRIBUTES = (DEFAULT, ) = ('default', ) _DEVICES_KEYS = ( ID, INTERFACE_NAME, ) = ( 'id', 'interface_name', ) _CONNECTIONS_KEYS = ( NETWORK_ID, NETWORK, SEGMENTATION_TYPE, SEGMENTATION_ID, ) = ( 'network_id', 'network', 'segmentation_type', 'segmentation_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, description=_('The name of the network gateway.'), update_allowed=True), DEVICES: properties.Schema( properties.Schema.LIST, description=_('Device info for this network gateway.'), required=True, constraints=[constraints.Length(min=1)], update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ ID: properties.Schema(properties.Schema.STRING, description=_( 'The device id for the network ' 'gateway.'), required=True), INTERFACE_NAME: properties.Schema(properties.Schema.STRING, description=_( 'The interface name for the ' 'network gateway.'), required=True) })), CONNECTIONS: properties.Schema( properties.Schema.LIST, description=_('Connection info for this network gateway.'), default={}, update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % NETWORK, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[ constraints.CustomConstraint('neutron.network') ], ), NETWORK: properties.Schema( properties.Schema.STRING, description=_('The internal network to connect on ' 'the network gateway.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[ constraints.CustomConstraint('neutron.network') ], ), SEGMENTATION_TYPE: properties.Schema( properties.Schema.STRING, description=_( 'L2 segmentation strategy on the external ' 'side of the network gateway.'), default='flat', constraints=[ constraints.AllowedValues(('flat', 'vlan')) ]), SEGMENTATION_ID: properties.Schema( properties.Schema.INTEGER, description=_( 'The id for L2 segment on the external side ' 'of the network gateway. Must be specified ' 'when using vlan.'), constraints=[constraints.Range(0, 4094)]) })) } attributes_schema = { DEFAULT: attributes.Schema(_("A boolean value of default flag."), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.CONNECTIONS, self.NETWORK], value_name=self.NETWORK_ID), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.CONNECTIONS, self.NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network') ] def _show_resource(self): return self.client().show_network_gateway( self.resource_id)['network_gateway'] def validate(self): """Validate any of the provided params.""" super(NetworkGateway, self).validate() connections = self.properties[self.CONNECTIONS] for connection in connections: segmentation_type = connection[self.SEGMENTATION_TYPE] segmentation_id = connection.get(self.SEGMENTATION_ID) if segmentation_type == 'vlan' and segmentation_id is None: msg = _("segmentation_id must be specified for using vlan") raise exception.StackValidationFailed(message=msg) if segmentation_type == 'flat' and segmentation_id: msg = _("segmentation_id cannot be specified except 0 for " "using flat") raise exception.StackValidationFailed(message=msg) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) connections = props.pop(self.CONNECTIONS) ret = self.client().create_network_gateway({'network_gateway': props})['network_gateway'] self.resource_id_set(ret['id']) for connection in connections: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().connect_network_gateway(ret['id'], connection) def handle_delete(self): if not self.resource_id: return connections = self.properties[self.CONNECTIONS] for connection in connections: with self.client_plugin().ignore_not_found: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().disconnect_network_gateway( self.resource_id, connection) try: self.client().delete_network_gateway(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): connections = None if self.CONNECTIONS in prop_diff: connections = prop_diff.pop(self.CONNECTIONS) if self.DEVICES in prop_diff: self.handle_delete() self.properties.data.update(prop_diff) self.handle_create() return if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_network_gateway( self.resource_id, {'network_gateway': prop_diff}) if connections: for connection in self.properties[self.CONNECTIONS]: with self.client_plugin().ignore_not_found: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().disconnect_network_gateway( self.resource_id, connection) for connection in connections: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().connect_network_gateway(self.resource_id, connection)
class ElasticIp(resource.Resource): PROPERTIES = ( DOMAIN, INSTANCE_ID, ) = ( 'Domain', 'InstanceId', ) ATTRIBUTES = (ALLOCATION_ID, ) = ('AllocationId', ) properties_schema = { DOMAIN: properties.Schema( properties.Schema.STRING, _('Set to "vpc" to have IP address allocation associated to your ' 'VPC.'), constraints=[ constraints.AllowedValues(['vpc']), ]), INSTANCE_ID: properties.Schema( properties.Schema.STRING, _('Instance ID to associate with EIP.'), update_allowed=True, constraints=[constraints.CustomConstraint('nova.server')]), } attributes_schema = { ALLOCATION_ID: attributes.Schema(_( 'ID that AWS assigns to represent the allocation of the address ' 'for use with Amazon VPC. Returned only for VPC elastic IP ' 'addresses.'), type=attributes.Schema.STRING), } default_client_name = 'nova' def __init__(self, name, json_snippet, stack): super(ElasticIp, self).__init__(name, json_snippet, stack) self.ipaddress = None def _ipaddress(self): if self.ipaddress is None and self.resource_id is not None: if self.properties[self.DOMAIN]: try: ips = self.neutron().show_floatingip(self.resource_id) except Exception as ex: self.client_plugin('neutron').ignore_not_found(ex) else: self.ipaddress = ips['floatingip']['floating_ip_address'] else: try: ips = self.client().floating_ips.get(self.resource_id) except Exception as e: self.client_plugin('nova').ignore_not_found(e) else: self.ipaddress = ips.ip return self.ipaddress or '' def handle_create(self): """Allocate a floating IP for the current tenant.""" ips = None if self.properties[self.DOMAIN]: ext_net = internet_gateway.InternetGateway.get_external_network_id( self.neutron()) props = {'floating_network_id': ext_net} ips = self.neutron().create_floatingip({'floatingip': props})['floatingip'] self.ipaddress = ips['floating_ip_address'] self.resource_id_set(ips['id']) LOG.info(_LI('ElasticIp create %s'), str(ips)) else: try: ips = self.client().floating_ips.create() except Exception as e: with excutils.save_and_reraise_exception(): if self.client_plugin('nova').is_not_found(e): LOG.error( _LE("No default floating IP pool configured." " Set 'default_floating_pool' in " "nova.conf.")) if ips: self.ipaddress = ips.ip self.resource_id_set(ips.id) LOG.info(_LI('ElasticIp create %s'), str(ips)) instance_id = self.properties[self.INSTANCE_ID] if instance_id: server = self.client().servers.get(instance_id) server.add_floating_ip(self._ipaddress()) def handle_delete(self): if self.resource_id is None: return # may be just create an eip when creation, or create the association # failed when creation, there will no association, if we attempt to # disassociate, an exception will raised, we need # to catch and ignore it, and then to deallocate the eip instance_id = self.properties[self.INSTANCE_ID] if instance_id: try: server = self.client().servers.get(instance_id) if server: server.remove_floating_ip(self._ipaddress()) except Exception as e: is_not_found = self.client_plugin('nova').is_not_found(e) is_unprocessable_entity = self.client_plugin( 'nova').is_unprocessable_entity(e) if (not is_not_found and not is_unprocessable_entity): raise # deallocate the eip if self.properties[self.DOMAIN]: with self.client_plugin('neutron').ignore_not_found: self.neutron().delete_floatingip(self.resource_id) else: with self.client_plugin('nova').ignore_not_found: self.client().floating_ips.delete(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: if self.INSTANCE_ID in prop_diff: instance_id = prop_diff.get(self.INSTANCE_ID) if instance_id: # no need to remove the floating ip from the old instance, # nova does this automatically when calling # add_floating_ip(). server = self.client().servers.get(instance_id) server.add_floating_ip(self._ipaddress()) else: # to remove the floating_ip from the old instance instance_id_old = self.properties[self.INSTANCE_ID] if instance_id_old: server = self.client().servers.get(instance_id_old) server.remove_floating_ip(self._ipaddress()) def get_reference_id(self): eip = self._ipaddress() if eip: return six.text_type(eip) else: return six.text_type(self.name) def _resolve_attribute(self, name): if name == self.ALLOCATION_ID: return six.text_type(self.resource_id)
class KeystoneEndpoint(resource.Resource): """Heat Template Resource for Keystone Service Endpoint. Keystone endpoint is just the URL that can be used for accessing a service within OpenStack. Endpoint can be accessed by admin, by services or public, i.e. everyone can use this endpoint. """ support_status = support.SupportStatus( version='5.0.0', message=_('Supported versions: keystone v3')) default_client_name = 'keystone' entity = 'endpoints' PROPERTIES = ( NAME, REGION, SERVICE, INTERFACE, SERVICE_URL, ENABLED, ) = ( 'name', 'region', 'service', 'interface', 'url', 'enabled', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name of keystone endpoint.'), update_allowed=True), REGION: properties.Schema( properties.Schema.STRING, _('Name or Id of keystone region.'), update_allowed=True, constraints=[constraints.CustomConstraint('keystone.region')]), SERVICE: properties.Schema( properties.Schema.STRING, _('Name or Id of keystone service.'), update_allowed=True, required=True, constraints=[constraints.CustomConstraint('keystone.service')]), INTERFACE: properties.Schema(properties.Schema.STRING, _('Interface type of keystone service endpoint.'), update_allowed=True, required=True, constraints=[ constraints.AllowedValues( ['public', 'internal', 'admin']) ]), SERVICE_URL: properties.Schema(properties.Schema.STRING, _('URL of keystone service endpoint.'), update_allowed=True, required=True), ENABLED: properties.Schema( properties.Schema.BOOLEAN, _('This endpoint is enabled or disabled.'), default=True, update_allowed=True, support_status=support.SupportStatus(version='6.0.0')) } def client(self): return super(KeystoneEndpoint, self).client().client def handle_create(self): region = self.properties[self.REGION] service = self.properties[self.SERVICE] interface = self.properties[self.INTERFACE] url = self.properties[self.SERVICE_URL] name = (self.properties[self.NAME] or self.physical_resource_name()) enabled = self.properties[self.ENABLED] endpoint = self.client().endpoints.create(region=region, service=service, interface=interface, url=url, name=name, enabled=enabled) self.resource_id_set(endpoint.id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: region = prop_diff.get(self.REGION) service = prop_diff.get(self.SERVICE) interface = prop_diff.get(self.INTERFACE) url = prop_diff.get(self.SERVICE_URL) name = None # Don't update the name if no change if self.NAME in prop_diff: name = prop_diff[self.NAME] or self.physical_resource_name() enabled = prop_diff.get(self.ENABLED) self.client().endpoints.update(endpoint=self.resource_id, region=region, service=service, interface=interface, url=url, name=name, enabled=enabled)
class SwiftSignal(resource.Resource): """Resource for handling signals received by SwiftSignalHandle. This resource handles signals received by SwiftSignalHandle and is same as WaitCondition resource. """ support_status = support.SupportStatus(version='2014.2') default_client_name = "swift" PROPERTIES = ( HANDLE, TIMEOUT, COUNT, ) = ( 'handle', 'timeout', 'count', ) properties_schema = { HANDLE: properties.Schema(properties.Schema.STRING, required=True, description=_( 'URL of TempURL where resource will signal ' 'completion and optionally upload data.')), TIMEOUT: properties.Schema( properties.Schema.NUMBER, description=_('The maximum number of seconds to wait for the ' 'resource to signal completion. Once the timeout ' 'is reached, creation of the signal resource will ' 'fail.'), required=True, constraints=[ constraints.Range(1, 43200), ]), COUNT: properties.Schema(properties.Schema.INTEGER, description=_( 'The number of success signals that must be ' 'received before the stack creation process ' 'continues.'), default=1, constraints=[ constraints.Range(1, 1000), ]) } ATTRIBUTES = (DATA) = 'data' attributes_schema = { DATA: attributes.Schema( _('JSON data that was uploaded via the SwiftSignalHandle.'), type=attributes.Schema.STRING) } WAIT_STATUSES = ( STATUS_FAILURE, STATUS_SUCCESS, ) = ( 'FAILURE', 'SUCCESS', ) METADATA_KEYS = (DATA, REASON, STATUS, UNIQUE_ID) = ('data', 'reason', 'status', 'id') def __init__(self, name, json_snippet, stack): super(SwiftSignal, self).__init__(name, json_snippet, stack) self._obj_name = None self._url = None @property def url(self): if not self._url: self._url = parse.urlparse(self.properties[self.HANDLE]) return self._url @property def obj_name(self): if not self._obj_name: self._obj_name = self.url.path.split('/')[4] return self._obj_name def _validate_handle_url(self): parts = self.url.path.split('/') msg = _('"%(url)s" is not a valid SwiftSignalHandle. The %(part)s ' 'is invalid') cplugin = self.client_plugin() if not cplugin.is_valid_temp_url_path(self.url.path): raise ValueError(msg % { 'url': self.url.path, 'part': 'Swift TempURL path' }) if not parts[3] == self.stack.id: raise ValueError(msg % { 'url': self.url.path, 'part': 'container name' }) def handle_create(self): self._validate_handle_url() started_at = timeutils.utcnow() return started_at, float(self.properties[self.TIMEOUT]) def get_signals(self): try: container = self.client().get_container(self.stack.id) except Exception as exc: self.client_plugin().ignore_not_found(exc) LOG.debug("Swift container %s was not found" % self.stack.id) return [] index = container[1] if not index: LOG.debug("Swift objects in container %s were not found" % self.stack.id) return [] # Remove objects in that are for other handle resources, since # multiple SwiftSignalHandle resources in the same stack share # a container filtered = [obj for obj in index if self.obj_name in obj['name']] # Fetch objects from Swift and filter results obj_bodies = [] for obj in filtered: try: signal = self.client().get_object(self.stack.id, obj['name']) except Exception as exc: self.client_plugin().ignore_not_found(exc) continue body = signal[1] if body == swift.IN_PROGRESS: # Ignore the initial object continue if body == "": obj_bodies.append({}) continue try: obj_bodies.append(jsonutils.loads(body)) except ValueError: raise exception.Error( _("Failed to parse JSON data: %s") % body) # Set default values on each signal signals = [] signal_num = 1 for signal in obj_bodies: # Remove previous signals with the same ID sig_id = self.UNIQUE_ID ids = [s.get(sig_id) for s in signals if sig_id in s] if ids and sig_id in signal and ids.count(signal[sig_id]) > 0: [ signals.remove(s) for s in signals if s.get(sig_id) == signal[sig_id] ] # Make sure all fields are set, since all are optional signal.setdefault(self.DATA, None) unique_id = signal.setdefault(sig_id, signal_num) reason = 'Signal %s received' % unique_id signal.setdefault(self.REASON, reason) signal.setdefault(self.STATUS, self.STATUS_SUCCESS) signals.append(signal) signal_num += 1 return signals def get_status(self): return [s[self.STATUS] for s in self.get_signals()] def get_status_reason(self, status): return [ s[self.REASON] for s in self.get_signals() if s[self.STATUS] == status ] def get_data(self): signals = self.get_signals() if not signals: return None data = {} for signal in signals: data[signal[self.UNIQUE_ID]] = signal[self.DATA] return data def check_create_complete(self, create_data): if timeutils.is_older_than(*create_data): raise SwiftSignalTimeout(self) statuses = self.get_status() if not statuses: return False for status in statuses: if status == self.STATUS_FAILURE: failure = SwiftSignalFailure(self) LOG.info(_LI('%(name)s Failed (%(failure)s)'), { 'name': str(self), 'failure': str(failure) }) raise failure elif status != self.STATUS_SUCCESS: raise exception.Error(_("Unknown status: %s") % status) if len(statuses) >= self.properties[self.COUNT]: LOG.info(_LI("%s Succeeded"), str(self)) return True return False def _resolve_attribute(self, key): if key == self.DATA: return six.text_type(jsonutils.dumps(self.get_data()))
class AutoScalingPolicy(signal_responder.SignalResponder, cooldown.CooldownMixin): """A resource to manage scaling of `OS::Heat::AutoScalingGroup`. **Note** while it may incidentally support `AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that purpose and use `AWS::AutoScaling::ScalingPolicy` instead. Resource to manage scaling for `OS::Heat::AutoScalingGroup`, i.e. define which metric should be scaled and scaling adjustment, set cooldown etc. """ PROPERTIES = (AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE, COOLDOWN, MIN_ADJUSTMENT_STEP) = ( 'auto_scaling_group_id', 'scaling_adjustment', 'adjustment_type', 'cooldown', 'min_adjustment_step', ) ATTRIBUTES = (ALARM_URL, SIGNAL_URL) = ('alarm_url', 'signal_url') properties_schema = { # TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID AUTO_SCALING_GROUP_NAME: properties.Schema(properties.Schema.STRING, _('AutoScaling group ID to apply policy to.'), required=True), SCALING_ADJUSTMENT: properties.Schema(properties.Schema.NUMBER, _('Size of adjustment.'), required=True, update_allowed=True), ADJUSTMENT_TYPE: properties.Schema(properties.Schema.STRING, _('Type of adjustment (absolute or percentage).'), required=True, constraints=[ constraints.AllowedValues([ sc_util.CHANGE_IN_CAPACITY, sc_util.EXACT_CAPACITY, sc_util.PERCENT_CHANGE_IN_CAPACITY ]), ], update_allowed=True), COOLDOWN: properties.Schema(properties.Schema.NUMBER, _('Cooldown period, in seconds.'), update_allowed=True), MIN_ADJUSTMENT_STEP: properties.Schema( properties.Schema.INTEGER, _('Minimum number of resources that are added or removed ' 'when the AutoScaling group scales up or down. This can ' 'be used only when specifying percent_change_in_capacity ' 'for the adjustment_type property.'), constraints=[ constraints.Range(min=0, ), ], update_allowed=True), } attributes_schema = { ALARM_URL: attributes.Schema(_("A signed url to handle the alarm."), type=attributes.Schema.STRING), SIGNAL_URL: attributes.Schema( _("A url to handle the alarm using native API."), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), } def validate(self): """Add validation for min_adjustment_step.""" super(AutoScalingPolicy, self).validate() self._validate_min_adjustment_step() def _validate_min_adjustment_step(self): adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE) adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP) if (adjustment_type != sc_util.PERCENT_CHANGE_IN_CAPACITY and adjustment_step is not None): raise exception.ResourcePropertyValueDependency( prop1=self.MIN_ADJUSTMENT_STEP, prop2=self.ADJUSTMENT_TYPE, value=sc_util.PERCENT_CHANGE_IN_CAPACITY) def handle_metadata_reset(self): metadata = self.metadata_get() if 'scaling_in_progress' in metadata: metadata['scaling_in_progress'] = False self.metadata_set(metadata) def handle_create(self): super(AutoScalingPolicy, self).handle_create() self.resource_id_set(self._get_user_id()) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Updates self.properties, if Properties has changed. If Properties has changed, update self.properties, so we get the new values during any subsequent adjustment. """ if prop_diff: self.properties = json_snippet.properties(self.properties_schema, self.context) def handle_signal(self, details=None): # ceilometer sends details like this: # {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm', # u'reason': u'...'}) # in this policy we currently assume that this gets called # only when there is an alarm. But the template writer can # put the policy in all the alarm notifiers (nodata, and ok). # # our watchrule has upper case states so lower() them all. if details is None: alarm_state = 'alarm' else: alarm_state = details.get('current', details.get('state', 'alarm')).lower() LOG.info(_LI('Alarm %(name)s, new state %(state)s'), { 'name': self.name, 'state': alarm_state }) if alarm_state != 'alarm': raise exception.NoActionRequired() if not self._is_scaling_allowed(): LOG.info( _LI("%(name)s NOT performing scaling action, " "cooldown %(cooldown)s"), { 'name': self.name, 'cooldown': self.properties[self.COOLDOWN] }) raise exception.NoActionRequired() asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME] group = self.stack.resource_by_refid(asgn_id) changed_size = False try: if group is None: raise exception.NotFound( _('Alarm %(alarm)s could not find ' 'scaling group named "%(group)s"') % { 'alarm': self.name, 'group': asgn_id }) LOG.info( _LI('%(name)s Alarm, adjusting Group %(group)s with id ' '%(asgn_id)s by %(filter)s'), { 'name': self.name, 'group': group.name, 'asgn_id': asgn_id, 'filter': self.properties[self.SCALING_ADJUSTMENT] }) changed_size = group.adjust( self.properties[self.SCALING_ADJUSTMENT], self.properties[self.ADJUSTMENT_TYPE], self.properties[self.MIN_ADJUSTMENT_STEP]) finally: self._finished_scaling("%s : %s" % (self.properties[self.ADJUSTMENT_TYPE], self.properties[self.SCALING_ADJUSTMENT]), changed_size=changed_size) def _resolve_attribute(self, name): if self.resource_id is None: return if name == self.ALARM_URL: return six.text_type(self._get_ec2_signed_url()) elif name == self.SIGNAL_URL: return six.text_type(self._get_heat_signal_url()) def get_reference_id(self): return resource.Resource.get_reference_id(self)
class Pool(neutron.NeutronResource): """A resource for managing LBaaS v2 Pools. This resources manages Neutron-LBaaS v2 Pools, which represent a group of nodes. Pools define the subnet where nodes reside, balancing algorithm, and the nodes themselves. """ support_status = support.SupportStatus(version='6.0.0') required_service_extension = 'lbaasv2' PROPERTIES = ( ADMIN_STATE_UP, DESCRIPTION, SESSION_PERSISTENCE, NAME, LB_ALGORITHM, LISTENER, PROTOCOL, SESSION_PERSISTENCE_TYPE, SESSION_PERSISTENCE_COOKIE_NAME, ) = ('admin_state_up', 'description', 'session_persistence', 'name', 'lb_algorithm', 'listener', 'protocol', 'type', 'cookie_name') SESSION_PERSISTENCE_TYPES = (SOURCE_IP, HTTP_COOKIE, APP_COOKIE) = ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE') ATTRIBUTES = (HEALTHMONITOR_ID_ATTR, LISTENERS_ATTR, MEMBERS_ATTR) = ('healthmonitor_id', 'listeners', 'members') properties_schema = { ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of this pool.'), default=True, update_allowed=True, constraints=[constraints.AllowedValues(['True'])]), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description of this pool.'), update_allowed=True, default=''), SESSION_PERSISTENCE: properties.Schema( properties.Schema.MAP, _('Configuration of session persistence.'), schema={ SESSION_PERSISTENCE_TYPE: properties.Schema( properties.Schema.STRING, _('Method of implementation of session ' 'persistence feature.'), required=True, constraints=[ constraints.AllowedValues(SESSION_PERSISTENCE_TYPES) ]), SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema( properties.Schema.STRING, _('Name of the cookie, ' 'required if type is APP_COOKIE.')) }, ), NAME: properties.Schema(properties.Schema.STRING, _('Name of this pool.'), update_allowed=True), LB_ALGORITHM: properties.Schema( properties.Schema.STRING, _('The algorithm used to distribute load between the members of ' 'the pool.'), required=True, constraints=[ constraints.AllowedValues( ['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']), ], update_allowed=True, ), LISTENER: properties.Schema( properties.Schema.STRING, _('Listner name or ID to be associated with this pool.'), required=True), PROTOCOL: properties.Schema(properties.Schema.STRING, _('Protocol of the pool.'), required=True, constraints=[ constraints.AllowedValues(['TCP', 'HTTP']), ]), } attributes_schema = { HEALTHMONITOR_ID_ATTR: attributes.Schema( _('ID of the health monitor associated with this pool.'), type=attributes.Schema.STRING), LISTENERS_ATTR: attributes.Schema(_('Listener associated with this pool.'), type=attributes.Schema.STRING), MEMBERS_ATTR: attributes.Schema(_('Members associated with this pool.'), type=attributes.Schema.LIST), } def __init__(self, name, definition, stack): super(Pool, self).__init__(name, definition, stack) self._lb_id = None @property def lb_id(self): if self._lb_id is None: listener_id = self.client_plugin().find_resourceid_by_name_or_id( 'listener', self.properties[self.LISTENER]) listener = self.client().show_listener(listener_id)['listener'] self._lb_id = listener['loadbalancers'][0]['id'] return self._lb_id def validate(self): res = super(Pool, self).validate() if res: return res if self.properties[self.SESSION_PERSISTENCE] is not None: session_p = self.properties[self.SESSION_PERSISTENCE] persistence_type = session_p[self.SESSION_PERSISTENCE_TYPE] if persistence_type == self.APP_COOKIE: if not session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME): msg = (_('Property %(cookie)s is required when %(sp)s ' 'type is set to %(app)s.') % { 'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME, 'sp': self.SESSION_PERSISTENCE, 'app': self.APP_COOKIE }) raise exception.StackValidationFailed(message=msg) elif persistence_type == self.SOURCE_IP: if session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME): msg = (_('Property %(cookie)s must NOT be specified when ' '%(sp)s type is set to %(ip)s.') % { 'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME, 'sp': self.SESSION_PERSISTENCE, 'ip': self.SOURCE_IP }) raise exception.StackValidationFailed(message=msg) def _check_lb_status(self): return self.client_plugin().check_lb_status(self.lb_id) def handle_create(self): properties = self.prepare_properties(self.properties, self.physical_resource_name()) self.client_plugin().resolve_listener(properties, self.LISTENER, 'listener_id') session_p = properties.get(self.SESSION_PERSISTENCE) if session_p is not None: session_props = self.prepare_properties(session_p, None) properties[self.SESSION_PERSISTENCE] = session_props return properties def check_create_complete(self, properties): if self.resource_id is None: try: pool = self.client().create_lbaas_pool({'pool': properties})['pool'] self.resource_id_set(pool['id']) except Exception as ex: if self.client_plugin().is_invalid(ex): return False raise return self._check_lb_status() def _show_resource(self): return self.client().show_lbaas_pool(self.resource_id)['pool'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): self._update_called = False return prop_diff def check_update_complete(self, prop_diff): if not prop_diff: return True if not self._update_called: try: self.client().update_lbaas_pool(self.resource_id, {'pool': prop_diff}) self._update_called = True except Exception as ex: if self.client_plugin().is_invalid(ex): return False raise return self._check_lb_status() def handle_delete(self): self._delete_called = False def check_delete_complete(self, data): if self.resource_id is None: return True if not self._delete_called: try: self.client().delete_lbaas_pool(self.resource_id) self._delete_called = True except Exception as ex: if self.client_plugin().is_invalid(ex): return False elif self.client_plugin().is_not_found(ex): return True raise return self._check_lb_status()
class Volume(vb.BaseVolume): PROPERTIES = ( AVAILABILITY_ZONE, SIZE, BACKUP_ID, TAGS, ) = ( 'AvailabilityZone', 'Size', 'SnapshotId', 'Tags', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) properties_schema = { AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('The availability zone in which the volume will be created.'), required=True, immutable=True), SIZE: properties.Schema(properties.Schema.INTEGER, _('The size of the volume in GB.'), immutable=True, constraints=[ constraints.Range(min=1), ]), BACKUP_ID: properties.Schema( properties.Schema.STRING, _('If specified, the backup used as the source to create the ' 'volume.'), immutable=True, constraints=[constraints.CustomConstraint('cinder.backup')]), TAGS: properties.Schema(properties.Schema.LIST, _('The list of tags to associate with the volume.'), immutable=True, schema=properties.Schema( properties.Schema.MAP, schema={ TAG_KEY: properties.Schema(properties.Schema.STRING, required=True), TAG_VALUE: properties.Schema(properties.Schema.STRING, required=True), }, )), } _volume_creating_status = ['creating', 'restoring-backup'] def _create_arguments(self): if self.properties[self.TAGS]: tags = dict((tm[self.TAG_KEY], tm[self.TAG_VALUE]) for tm in self.properties[self.TAGS]) else: tags = None return { 'size': self.properties[self.SIZE], 'availability_zone': (self.properties[self.AVAILABILITY_ZONE] or None), 'metadata': tags }
class QoSPolicy(neutron.NeutronResource): """A resource for Neutron QoS Policy. This QoS policy can be associated with neutron resources, such as port and network, to provide QoS capabilities. The default policy usage of this resource is limited to administrators only. """ required_service_extension = 'qos' support_status = support.SupportStatus(version='6.0.0') PROPERTIES = ( NAME, DESCRIPTION, SHARED, TENANT_ID, ) = ( 'name', 'description', 'shared', 'tenant_id', ) ATTRIBUTES = ( RULES_ATTR, ) = ( 'rules', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('The name for the QoS policy.'), update_allowed=True ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('The description for the QoS policy.'), update_allowed=True ), SHARED: properties.Schema( properties.Schema.BOOLEAN, _('Whether this QoS policy should be shared to other tenants.'), default=False, update_allowed=True ), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The owner tenant ID of this QoS policy.') ), } attributes_schema = { RULES_ATTR: attributes.Schema( _("A list of all rules for the QoS policy."), type=attributes.Schema.LIST ) } def handle_create(self): props = self.prepare_properties( self.properties, self.physical_resource_name()) policy = self.client().create_qos_policy({'policy': props})['policy'] self.resource_id_set(policy['id']) def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: self.client().delete_qos_policy(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_qos_policy( self.resource_id, {'policy': prop_diff}) def _show_resource(self): return self.client().show_qos_policy( self.resource_id)['policy']
class FloatingIP(neutron.NeutronResource): """A resource for managing Neutron floating ips. Floating IP addresses can change their association between routers by action of the user. One of the most common use cases for floating IPs is to provide public IP addresses to a private cloud, where there are a limited number of IP addresses available. Another is for a public cloud user to have a "static" IP address that can be reassigned when an instance is upgraded or moved. """ PROPERTIES = ( FLOATING_NETWORK_ID, FLOATING_NETWORK, VALUE_SPECS, PORT_ID, FIXED_IP_ADDRESS, FLOATING_IP_ADDRESS, ) = ( 'floating_network_id', 'floating_network', 'value_specs', 'port_id', 'fixed_ip_address', 'floating_ip_address', ) ATTRIBUTES = ( ROUTER_ID, TENANT_ID, FLOATING_NETWORK_ID_ATTR, FIXED_IP_ADDRESS_ATTR, FLOATING_IP_ADDRESS_ATTR, PORT_ID_ATTR, ) = ( 'router_id', 'tenant_id', 'floating_network_id', 'fixed_ip_address', 'floating_ip_address', 'port_id', ) properties_schema = { FLOATING_NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % FLOATING_NETWORK, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.network')], ), FLOATING_NETWORK: properties.Schema( properties.Schema.STRING, _('Network to allocate floating IP from.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.network')], ), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the "floatingip" object in the ' 'creation request. Parameters are often specific to installed ' 'hardware or extensions.'), default={}), PORT_ID: properties.Schema( properties.Schema.STRING, _('ID of an existing port with at least one IP address to ' 'associate with this floating IP.'), update_allowed=True, constraints=[constraints.CustomConstraint('neutron.port')]), FIXED_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address to use if the port has multiple addresses.'), update_allowed=True, constraints=[constraints.CustomConstraint('ip_addr')]), FLOATING_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the floating IP. NOTE: The default policy ' 'setting in Neutron restricts usage of this property to ' 'administrative users only.'), constraints=[constraints.CustomConstraint('ip_addr')], support_status=support.SupportStatus(version='5.0.0'), ), } attributes_schema = { ROUTER_ID: attributes.Schema(_( 'ID of the router used as gateway, set when associated with a ' 'port.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_('The tenant owning this floating IP.'), type=attributes.Schema.STRING), FLOATING_NETWORK_ID_ATTR: attributes.Schema( _('ID of the network in which this IP is allocated.'), type=attributes.Schema.STRING), FIXED_IP_ADDRESS_ATTR: attributes.Schema( _('IP address of the associated port, if specified.'), type=attributes.Schema.STRING), FLOATING_IP_ADDRESS_ATTR: attributes.Schema(_('The allocated address of this IP.'), type=attributes.Schema.STRING), PORT_ID_ATTR: attributes.Schema(_('ID of the port associated with this IP.'), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.FLOATING_NETWORK], value_path=[self.FLOATING_NETWORK_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.FLOATING_NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network') ] def add_dependencies(self, deps): super(FloatingIP, self).add_dependencies(deps) for resource in six.itervalues(self.stack): # depend on any RouterGateway in this template with the same # network_id as this floating_network_id if resource.has_interface('OS::Neutron::RouterGateway'): gateway_network = resource.properties.get( router.RouterGateway.NETWORK) or resource.properties.get( router.RouterGateway.NETWORK_ID) floating_network = self.properties[self.FLOATING_NETWORK] if gateway_network == floating_network: deps += (self, resource) # depend on any RouterInterface in this template which interfaces # with the same subnet that this floating IP's port is assigned # to elif resource.has_interface('OS::Neutron::RouterInterface'): def port_on_subnet(resource, subnet): if not resource.has_interface('OS::Neutron::Port'): return False fixed_ips = resource.properties.get(port.Port.FIXED_IPS) if not fixed_ips: p_net = (resource.properties.get(port.Port.NETWORK) or resource.properties.get(port.Port.NETWORK_ID)) if p_net: subnets = self.client().show_network( p_net)['network']['subnets'] return subnet in subnets else: for fixed_ip in resource.properties.get( port.Port.FIXED_IPS): port_subnet = ( fixed_ip.get(port.Port.FIXED_IP_SUBNET) or fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID)) return subnet == port_subnet return False interface_subnet = (resource.properties.get( router.RouterInterface.SUBNET) or resource.properties.get( router.RouterInterface.SUBNET_ID)) # during create we have only unresolved value for functions, so # can not use None value for building correct dependencies if interface_subnet: for d in deps.graph()[self]: if port_on_subnet(d, interface_subnet): deps += (self, resource) break # depend on Router with EXTERNAL_GATEWAY_NETWORK property # this template with the same network_id as this # floating_network_id elif resource.has_interface('OS::Neutron::Router'): gateway = resource.properties.get( router.Router.EXTERNAL_GATEWAY) if gateway: gateway_network = gateway.get( router.Router.EXTERNAL_GATEWAY_NETWORK) floating_network = self.properties[self.FLOATING_NETWORK] if gateway_network == floating_network: deps += (self, resource) def validate(self): super(FloatingIP, self).validate() # fixed_ip_address cannot be specified without a port_id if self.properties[self.PORT_ID] is None and self.properties[ self.FIXED_IP_ADDRESS] is not None: raise exception.ResourcePropertyDependency( prop1=self.FIXED_IP_ADDRESS, prop2=self.PORT_ID) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['floating_network_id'] = props.pop(self.FLOATING_NETWORK) fip = self.client().create_floatingip({'floatingip': props})['floatingip'] self.resource_id_set(fip['id']) def _show_resource(self): return self.client().show_floatingip(self.resource_id)['floatingip'] def handle_delete(self): if not self.resource_id: return with self.client_plugin().ignore_not_found: self.client().delete_floatingip(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: port_id = prop_diff.get(self.PORT_ID, self.properties[self.PORT_ID]) fixed_ip_address = prop_diff.get( self.FIXED_IP_ADDRESS, self.properties[self.FIXED_IP_ADDRESS]) request_body = { 'floatingip': { 'port_id': port_id, 'fixed_ip_address': fixed_ip_address } } self.client().update_floatingip(self.resource_id, request_body)
class RouterInterface(neutron.NeutronResource): """A resource for managing Neutron router interfaces. Router interfaces associate routers with existing subnets or ports. """ required_service_extension = 'router' PROPERTIES = (ROUTER, ROUTER_ID, SUBNET_ID, SUBNET, PORT_ID, PORT) = ('router', 'router_id', 'subnet_id', 'subnet', 'port_id', 'port') properties_schema = { ROUTER: properties.Schema( properties.Schema.STRING, _('The router.'), required=True, constraints=[constraints.CustomConstraint('neutron.router')], ), ROUTER_ID: properties.Schema( properties.Schema.STRING, _('ID of the router.'), support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % ROUTER, version='2015.1', previous_status=support.SupportStatus(version='2013.1'))), constraints=[constraints.CustomConstraint('neutron.router')], ), SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % SUBNET, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.subnet')]), SUBNET: properties.Schema( properties.Schema.STRING, _('The subnet, either subnet or port should be ' 'specified.'), constraints=[constraints.CustomConstraint('neutron.subnet')]), PORT_ID: properties.Schema( properties.Schema.STRING, _('The port id, either subnet or port_id should be specified.'), support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % PORT, version='2015.1', previous_status=support.SupportStatus(version='2014.1'))), constraints=[constraints.CustomConstraint('neutron.port')]), PORT: properties.Schema( properties.Schema.STRING, _('The port, either subnet or port should be specified.'), support_status=support.SupportStatus(version='2015.1'), constraints=[constraints.CustomConstraint('neutron.port')]) } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.PORT], value_path=[self.PORT_ID]), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.ROUTER], value_path=[self.ROUTER_ID]), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.SUBNET], value_path=[self.SUBNET_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.PORT], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='port'), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.ROUTER], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='router'), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet') ] def validate(self): """Validate any of the provided params.""" super(RouterInterface, self).validate() prop_subnet_exists = self.properties.get(self.SUBNET) is not None prop_port_exists = self.properties.get(self.PORT) is not None if prop_subnet_exists and prop_port_exists: raise exception.ResourcePropertyConflict(self.SUBNET, self.PORT) if not prop_subnet_exists and not prop_port_exists: raise exception.PropertyUnspecifiedError(self.SUBNET, self.PORT) def handle_create(self): router_id = dict(self.properties).get(self.ROUTER) key = 'subnet_id' value = dict(self.properties).get(self.SUBNET) if not value: key = 'port_id' value = dict(self.properties).get(self.PORT) self.client().add_interface_router(router_id, {key: value}) self.resource_id_set('%s:%s=%s' % (router_id, key, value)) def handle_delete(self): if not self.resource_id: return tokens = self.resource_id.replace('=', ':').split(':') if len(tokens) == 2: # compatible with old data tokens.insert(1, 'subnet_id') (router_id, key, value) = tokens with self.client_plugin().ignore_not_found: self.client().remove_interface_router(router_id, {key: value})
class VPC(resource.Resource): PROPERTIES = ( CIDR_BLOCK, INSTANCE_TENANCY, TAGS, ) = ( 'CidrBlock', 'InstanceTenancy', 'Tags', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) properties_schema = { CIDR_BLOCK: properties.Schema(properties.Schema.STRING, _('CIDR block to apply to the VPC.')), INSTANCE_TENANCY: properties.Schema( properties.Schema.STRING, _('Allowed tenancy of instances launched in the VPC. default - ' 'any tenancy; dedicated - instance will be dedicated, ' 'regardless of the tenancy option specified at instance ' 'launch.'), default='default', constraints=[ constraints.AllowedValues(['default', 'dedicated']), ], implemented=False), TAGS: properties.Schema(properties.Schema.LIST, schema=properties.Schema( properties.Schema.MAP, _('List of tags to attach to the instance.'), schema={ TAG_KEY: properties.Schema(properties.Schema.STRING, required=True), TAG_VALUE: properties.Schema(properties.Schema.STRING, required=True), }, implemented=False, )), } default_client_name = 'neutron' def handle_create(self): # The VPC's net and router are associated by having identical names. net_props = {'name': self.physical_resource_name()} router_props = {'name': self.physical_resource_name()} net = self.client().create_network({'network': net_props})['network'] self.resource_id_set(net['id']) self.client().create_router({'router': router_props})['router'] @staticmethod def network_for_vpc(client, network_id): return client.show_network(network_id)['network'] @staticmethod def router_for_vpc(client, network_id): # first get the neutron net net = VPC.network_for_vpc(client, network_id) # then find a router with the same name routers = client.list_routers(name=net['name'])['routers'] if len(routers) == 0: # There may be no router if the net was created manually # instead of in another stack. return None if len(routers) > 1: raise exception.Error( _('Multiple routers found with name %s') % net['name']) return routers[0] def check_create_complete(self, *args): net = self.network_for_vpc(self.client(), self.resource_id) if not neutron.NeutronResource.is_built(net): return False router = self.router_for_vpc(self.client(), self.resource_id) return neutron.NeutronResource.is_built(router) def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: router = self.router_for_vpc(self.client(), self.resource_id) if router: self.client().delete_router(router['id']) with self.client_plugin().ignore_not_found: self.client().delete_network(self.resource_id)