class IPsecSiteConnection(neutron.NeutronResource): """A resource for IPsec site connection in Neutron. This resource has details for the site-to-site IPsec connection, including the peer CIDRs, MTU, peer address, DPD settings and status. """ required_service_extension = 'vpnaas' PROPERTIES = ( NAME, DESCRIPTION, PEER_ADDRESS, PEER_ID, PEER_CIDRS, MTU, DPD, PSK, INITIATOR, ADMIN_STATE_UP, IKEPOLICY_ID, IPSECPOLICY_ID, VPNSERVICE_ID, ) = ( 'name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'mtu', 'dpd', 'psk', 'initiator', 'admin_state_up', 'ikepolicy_id', 'ipsecpolicy_id', 'vpnservice_id', ) _DPD_KEYS = ( DPD_ACTIONS, DPD_INTERVAL, DPD_TIMEOUT, ) = ( 'actions', 'interval', 'timeout', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, AUTH_MODE, DESCRIPTION_ATTR, DPD_ATTR, IKEPOLICY_ID_ATTR, INITIATOR_ATTR, IPSECPOLICY_ID_ATTR, MTU_ATTR, NAME_ATTR, PEER_ADDRESS_ATTR, PEER_CIDRS_ATTR, PEER_ID_ATTR, PSK_ATTR, ROUTE_MODE, STATUS, TENANT_ID, VPNSERVICE_ID_ATTR, ) = ( 'admin_state_up', 'auth_mode', 'description', 'dpd', 'ikepolicy_id', 'initiator', 'ipsecpolicy_id', 'mtu', 'name', 'peer_address', 'peer_cidrs', 'peer_id', 'psk', 'route_mode', 'status', 'tenant_id', 'vpnservice_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the ipsec site connection.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the ipsec site connection.'), update_allowed=True), PEER_ADDRESS: properties.Schema( properties.Schema.STRING, _('Remote branch router public IPv4 address or IPv6 address or ' 'FQDN.'), required=True), PEER_ID: properties.Schema(properties.Schema.STRING, _('Remote branch router identity.'), required=True), PEER_CIDRS: properties.Schema( properties.Schema.LIST, _('Remote subnet(s) in CIDR format.'), required=True, schema=properties.Schema( properties.Schema.STRING, constraints=[constraints.CustomConstraint('net_cidr')])), MTU: properties.Schema( properties.Schema.INTEGER, _('Maximum transmission unit size (in bytes) for the ipsec site ' 'connection.'), default=1500), DPD: properties.Schema( properties.Schema.MAP, _('Dead Peer Detection protocol configuration for the ipsec site ' 'connection.'), schema={ DPD_ACTIONS: properties.Schema(properties.Schema.STRING, _('Controls DPD protocol mode.'), default='hold', constraints=[ constraints.AllowedValues([ 'clear', 'disabled', 'hold', 'restart', 'restart-by-peer' ]), ]), DPD_INTERVAL: properties.Schema(properties.Schema.INTEGER, _('Number of seconds for the DPD delay.'), default=30), DPD_TIMEOUT: properties.Schema(properties.Schema.INTEGER, _('Number of seconds for the DPD timeout.'), default=120), }), PSK: properties.Schema( properties.Schema.STRING, _('Pre-shared key string for the ipsec site connection.'), required=True), INITIATOR: properties.Schema( properties.Schema.STRING, _('Initiator state in lowercase for the ipsec site connection.'), default='bi-directional', constraints=[ constraints.AllowedValues(['bi-directional', 'response-only']), ]), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('Administrative state for the ipsec site connection.'), default=True, update_allowed=True), IKEPOLICY_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the ike policy associated with the ' 'ipsec site connection.'), required=True), IPSECPOLICY_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the ipsec policy associated with the ' 'ipsec site connection.'), required=True), VPNSERVICE_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the vpn service associated with the ' 'ipsec site connection.'), required=True), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema( _('The administrative state of the ipsec site connection.'), type=attributes.Schema.STRING), AUTH_MODE: attributes.Schema( _('The authentication mode of the ipsec site connection.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the ipsec site connection.'), type=attributes.Schema.STRING), DPD_ATTR: attributes.Schema(_( 'The dead peer detection protocol configuration of the ipsec ' 'site connection.'), type=attributes.Schema.MAP), IKEPOLICY_ID_ATTR: attributes.Schema(_( 'The unique identifier of ike policy associated with the ipsec ' 'site connection.'), type=attributes.Schema.STRING), INITIATOR_ATTR: attributes.Schema(_('The initiator of the ipsec site connection.'), type=attributes.Schema.STRING), IPSECPOLICY_ID_ATTR: attributes.Schema(_( 'The unique identifier of ipsec policy associated with the ' 'ipsec site connection.'), type=attributes.Schema.STRING), MTU_ATTR: attributes.Schema(_( 'The maximum transmission unit size (in bytes) of the ipsec ' 'site connection.'), type=attributes.Schema.STRING), NAME_ATTR: attributes.Schema(_('The name of the ipsec site connection.'), type=attributes.Schema.STRING), PEER_ADDRESS_ATTR: attributes.Schema(_( 'The remote branch router public IPv4 address or IPv6 address ' 'or FQDN.'), type=attributes.Schema.STRING), PEER_CIDRS_ATTR: attributes.Schema(_( 'The remote subnet(s) in CIDR format of the ipsec site ' 'connection.'), type=attributes.Schema.LIST), PEER_ID_ATTR: attributes.Schema(_( 'The remote branch router identity of the ipsec site ' 'connection.'), type=attributes.Schema.STRING), PSK_ATTR: attributes.Schema( _('The pre-shared key string of the ipsec site connection.'), type=attributes.Schema.STRING), ROUTE_MODE: attributes.Schema(_('The route mode of the ipsec site connection.'), type=attributes.Schema.STRING), STATUS: attributes.Schema(_('The status of the ipsec site connection.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_( 'The unique identifier of the tenant owning the ipsec site ' 'connection.'), type=attributes.Schema.STRING), VPNSERVICE_ID_ATTR: attributes.Schema(_( 'The unique identifier of vpn service associated with the ipsec ' 'site connection.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_ipsec_site_connection( self.resource_id)['ipsec_site_connection'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) ipsec_site_connection = self.client().create_ipsec_site_connection( {'ipsec_site_connection': props})['ipsec_site_connection'] self.resource_id_set(ipsec_site_connection['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_ipsec_site_connection( self.resource_id, {'ipsec_site_connection': prop_diff}) def handle_delete(self): try: self.client().delete_ipsec_site_connection(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class NetworkInterface(resource.Resource): PROPERTIES = ( DESCRIPTION, GROUP_SET, PRIVATE_IP_ADDRESS, SOURCE_DEST_CHECK, SUBNET_ID, TAGS, ) = ( 'Description', 'GroupSet', 'PrivateIpAddress', 'SourceDestCheck', 'SubnetId', 'Tags', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) ATTRIBUTES = (PRIVATE_IP_ADDRESS_ATTR, ) = ('PrivateIpAddress', ) properties_schema = { DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for this interface.')), GROUP_SET: properties.Schema( properties.Schema.LIST, _('List of security group IDs associated with this interface.'), update_allowed=True), PRIVATE_IP_ADDRESS: properties.Schema(properties.Schema.STRING), SOURCE_DEST_CHECK: properties.Schema( properties.Schema.BOOLEAN, _('Flag indicating if traffic to or from instance is validated.'), implemented=False), SUBNET_ID: properties.Schema( properties.Schema.STRING, _('Subnet ID to associate with this interface.'), required=True, constraints=[constraints.CustomConstraint('neutron.subnet')]), TAGS: properties.Schema( properties.Schema.LIST, schema=properties.Schema( properties.Schema.MAP, _('List of tags associated with this interface.'), schema={ TAG_KEY: properties.Schema(properties.Schema.STRING, required=True), TAG_VALUE: properties.Schema(properties.Schema.STRING, required=True), }, implemented=False, )), } attributes_schema = { PRIVATE_IP_ADDRESS: attributes.Schema(_('Private IP address of the network interface.'), type=attributes.Schema.STRING), } default_client_name = 'neutron' @staticmethod def network_id_from_subnet_id(neutronclient, subnet_id): subnet_info = neutronclient.show_subnet(subnet_id) return subnet_info['subnet']['network_id'] def __init__(self, name, json_snippet, stack): super(NetworkInterface, self).__init__(name, json_snippet, stack) self.fixed_ip_address = None def handle_create(self): subnet_id = self.properties[self.SUBNET_ID] network_id = self.client_plugin().network_id_from_subnet_id(subnet_id) fixed_ip = {'subnet_id': subnet_id} if self.properties[self.PRIVATE_IP_ADDRESS]: fixed_ip['ip_address'] = self.properties[self.PRIVATE_IP_ADDRESS] props = { 'name': self.physical_resource_name(), 'admin_state_up': True, 'network_id': network_id, 'fixed_ips': [fixed_ip] } # if without group_set, don't set the 'security_groups' property, # neutron will create the port with the 'default' securityGroup, # if has the group_set and the value is [], which means to create the # port without securityGroup(same as the behavior of neutron) if self.properties[self.GROUP_SET] is not None: sgs = self.client_plugin().get_secgroup_uuids( self.properties.get(self.GROUP_SET)) props['security_groups'] = sgs port = self.client().create_port({'port': props})['port'] self.resource_id_set(port['id']) def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: self.client().delete_port(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: update_props = {} if self.GROUP_SET in prop_diff: group_set = prop_diff.get(self.GROUP_SET) # update should keep the same behavior as creation, # if without the GroupSet in update template, we should # update the security_groups property to referent # the 'default' security group if group_set is not None: sgs = self.client_plugin().get_secgroup_uuids(group_set) else: sgs = self.client_plugin().get_secgroup_uuids(['default']) update_props['security_groups'] = sgs self.client().update_port(self.resource_id, {'port': update_props}) def _get_fixed_ip_address(self): if self.fixed_ip_address is None: port = self.client().show_port(self.resource_id)['port'] if port['fixed_ips'] and len(port['fixed_ips']) > 0: self.fixed_ip_address = port['fixed_ips'][0]['ip_address'] return self.fixed_ip_address def _resolve_attribute(self, name): if name == self.PRIVATE_IP_ADDRESS: return self._get_fixed_ip_address()
class HealthMonitor(neutron.NeutronResource): """A resource for managing health monitors for loadbalancers in Neutron. A health monitor is used to determine whether or not back-end members of the VIP's pool are usable for processing a request. A pool can have several health monitors associated with it. There are different types of health monitors supported by the OpenStack LBaaS service: - PING: used to ping the members using ICMP. - TCP: used to connect to the members using TCP. - HTTP: used to send an HTTP request to the member. - HTTPS: used to send a secure HTTP request to the member. """ required_service_extension = 'lbaas' PROPERTIES = ( DELAY, TYPE, MAX_RETRIES, TIMEOUT, ADMIN_STATE_UP, HTTP_METHOD, EXPECTED_CODES, URL_PATH, ) = ( 'delay', 'type', 'max_retries', 'timeout', 'admin_state_up', 'http_method', 'expected_codes', 'url_path', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, DELAY_ATTR, EXPECTED_CODES_ATTR, HTTP_METHOD_ATTR, MAX_RETRIES_ATTR, TIMEOUT_ATTR, TYPE_ATTR, URL_PATH_ATTR, TENANT_ID, ) = ( 'admin_state_up', 'delay', 'expected_codes', 'http_method', 'max_retries', 'timeout', 'type', 'url_path', 'tenant_id', ) properties_schema = { DELAY: properties.Schema( properties.Schema.INTEGER, _('The minimum time in seconds between regular connections of ' 'the member.'), required=True, update_allowed=True ), TYPE: properties.Schema( properties.Schema.STRING, _('One of predefined health monitor types.'), required=True, constraints=[ constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']), ] ), MAX_RETRIES: properties.Schema( properties.Schema.INTEGER, _('Number of permissible connection failures before changing the ' 'member status to INACTIVE.'), required=True, update_allowed=True ), TIMEOUT: properties.Schema( properties.Schema.INTEGER, _('Maximum number of seconds for a monitor to wait for a ' 'connection to be established before it times out.'), required=True, update_allowed=True ), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of the health monitor.'), default=True, update_allowed=True ), HTTP_METHOD: properties.Schema( properties.Schema.STRING, _('The HTTP method used for requests by the monitor of type ' 'HTTP.'), update_allowed=True ), EXPECTED_CODES: properties.Schema( properties.Schema.STRING, _('The list of HTTP status codes expected in response from the ' 'member to declare it healthy.'), update_allowed=True ), URL_PATH: properties.Schema( properties.Schema.STRING, _('The HTTP path used in the HTTP request used by the monitor to ' 'test a member health.'), update_allowed=True ), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema( _('The administrative state of this health monitor.'), type=attributes.Schema.STRING ), DELAY_ATTR: attributes.Schema( _('The minimum time in seconds between regular connections ' 'of the member.'), type=attributes.Schema.STRING ), EXPECTED_CODES_ATTR: attributes.Schema( _('The list of HTTP status codes expected in response ' 'from the member to declare it healthy.'), type=attributes.Schema.LIST ), HTTP_METHOD_ATTR: attributes.Schema( _('The HTTP method used for requests by the monitor of ' 'type HTTP.'), type=attributes.Schema.STRING ), MAX_RETRIES_ATTR: attributes.Schema( _('Number of permissible connection failures before changing ' 'the member status to INACTIVE.'), type=attributes.Schema.STRING ), TIMEOUT_ATTR: attributes.Schema( _('Maximum number of seconds for a monitor to wait for a ' 'connection to be established before it times out.'), type=attributes.Schema.STRING ), TYPE_ATTR: attributes.Schema( _('One of predefined health monitor types.'), type=attributes.Schema.STRING ), URL_PATH_ATTR: attributes.Schema( _('The HTTP path used in the HTTP request used by the monitor ' 'to test a member health.'), type=attributes.Schema.STRING ), TENANT_ID: attributes.Schema( _('Tenant owning the health monitor.'), type=attributes.Schema.STRING ), } def handle_create(self): properties = self.prepare_properties( self.properties, self.physical_resource_name()) health_monitor = self.client().create_health_monitor( {'health_monitor': properties})['health_monitor'] self.resource_id_set(health_monitor['id']) def _show_resource(self): return self.client().show_health_monitor( self.resource_id)['health_monitor'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_health_monitor( self.resource_id, {'health_monitor': prop_diff}) def handle_delete(self): if not self.resource_id: return try: self.client().delete_health_monitor(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class SoftwareDeploymentGroup(resource_group.ResourceGroup): """This resource associates a group of servers with some configuration. The configuration is to be deployed to all servers in the group. The properties work in a similar way to OS::Heat::SoftwareDeployment, and in addition to the attributes documented, you may pass any attribute supported by OS::Heat::SoftwareDeployment, including those exposing arbitrary outputs, and return a map of deployment names to the specified attribute. """ support_status = support.SupportStatus(version='5.0.0') PROPERTIES = ( SERVERS, CONFIG, INPUT_VALUES, DEPLOY_ACTIONS, NAME, SIGNAL_TRANSPORT, ) = ( 'servers', SoftwareDeployment.CONFIG, SoftwareDeployment.INPUT_VALUES, SoftwareDeployment.DEPLOY_ACTIONS, SoftwareDeployment.NAME, SoftwareDeployment.SIGNAL_TRANSPORT, ) ATTRIBUTES = (STDOUTS, STDERRS, STATUS_CODES) = ('deploy_stdouts', 'deploy_stderrs', 'deploy_status_codes') _sd_ps = SoftwareDeployment.properties_schema _rg_ps = resource_group.ResourceGroup.properties_schema properties_schema = { SERVERS: properties.Schema( properties.Schema.MAP, _('A map of Nova names and IDs to apply configuration to.'), update_allowed=True), CONFIG: _sd_ps[CONFIG], INPUT_VALUES: _sd_ps[INPUT_VALUES], DEPLOY_ACTIONS: _sd_ps[DEPLOY_ACTIONS], NAME: _sd_ps[NAME], SIGNAL_TRANSPORT: _sd_ps[SIGNAL_TRANSPORT] } attributes_schema = { STDOUTS: attributes.Schema(_( "A map of Nova names and captured stdouts from the " "configuration execution to each server."), type=attributes.Schema.MAP), STDERRS: attributes.Schema(_( "A map of Nova names and captured stderrs from the " "configuration execution to each server."), type=attributes.Schema.MAP), STATUS_CODES: attributes.Schema(_( "A map of Nova names and returned status code from the " "configuration execution."), type=attributes.Schema.MAP), } update_policy_schema = {} def get_size(self): return len(self.properties.get(self.SERVERS, {})) def _resource_names(self): return six.iterkeys(self.properties.get(self.SERVERS, {})) def get_resource_def(self, include_all=False): return dict(self.properties) def build_resource_definition(self, res_name, res_defn): props = copy.deepcopy(res_defn) servers = props.pop(self.SERVERS) props[SoftwareDeployment.SERVER] = servers.get(res_name) return rsrc_defn.ResourceDefinition(res_name, 'OS::Heat::SoftwareDeployment', props, None) def get_attribute(self, key, *path): rg = super(SoftwareDeploymentGroup, self) if key == self.STDOUTS: n_attr = SoftwareDeployment.STDOUT elif key == self.STDERRS: n_attr = SoftwareDeployment.STDERR elif key == self.STATUS_CODES: n_attr = SoftwareDeployment.STATUS_CODE else: # Allow any attribute valid for a single SoftwareDeployment # including arbitrary outputs, so we can't validate here n_attr = key rg_attr = rg.get_attribute(rg.ATTR_ATTRIBUTES, n_attr) return attributes.select_from_attribute(rg_attr, path)
class PoolMember(neutron.NeutronResource): """A resource for managing LBaaS v2 Pool Members. A pool member represents a single backend node. """ support_status = support.SupportStatus(version='6.0.0') required_service_extension = 'lbaasv2' PROPERTIES = ( POOL, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP, SUBNET, ) = ( 'pool', 'address', 'protocol_port', 'weight', 'admin_state_up', 'subnet' ) ATTRIBUTES = ( ADDRESS_ATTR, POOL_ID_ATTR ) = ( 'address', 'pool_id' ) properties_schema = { POOL: properties.Schema( properties.Schema.STRING, _('Name or ID of the load balancing pool.'), required=True ), ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the pool member on the pool network.'), required=True, constraints=[ constraints.CustomConstraint('ip_addr') ] ), PROTOCOL_PORT: properties.Schema( properties.Schema.INTEGER, _('Port on which the pool member listens for requests or ' 'connections.'), required=True, constraints=[ constraints.Range(1, 65535), ] ), WEIGHT: properties.Schema( properties.Schema.INTEGER, _('Weight of pool member in the pool (default to 1).'), default=1, constraints=[ constraints.Range(0, 256), ], update_allowed=True ), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of the pool member.'), default=True, update_allowed=True, constraints=[constraints.AllowedValues(['True'])] ), SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet name or ID of this member.'), constraints=[ constraints.CustomConstraint('neutron.subnet') ] ), } attributes_schema = { ADDRESS_ATTR: attributes.Schema( _('The IP address of the pool member.'), type=attributes.Schema.STRING ), POOL_ID_ATTR: attributes.Schema( _('The ID of the pool to which the pool member belongs.'), type=attributes.Schema.STRING ) } def __init__(self, name, definition, stack): super(PoolMember, self).__init__(name, definition, stack) self._pool_id = None self._lb_id = None @property def pool_id(self): if self._pool_id is None: self._pool_id = self.client_plugin().find_resourceid_by_name_or_id( self.POOL, self.properties[self.POOL], cmd_resource='lbaas_pool') return self._pool_id @property def lb_id(self): if self._lb_id is None: pool = self.client().show_lbaas_pool(self.pool_id)['pool'] listener_id = pool['listeners'][0]['id'] listener = self.client().show_listener(listener_id)['listener'] self._lb_id = listener['loadbalancers'][0]['id'] return self._lb_id def _check_lb_status(self): return self.client_plugin().check_lb_status(self.lb_id) def handle_create(self): properties = self.prepare_properties( self.properties, self.physical_resource_name()) self.client_plugin().resolve_pool( properties, self.POOL, 'pool_id') properties.pop('pool_id') if self.SUBNET in properties: self.client_plugin().resolve_subnet( properties, self.SUBNET, 'subnet_id') return properties def check_create_complete(self, properties): if self.resource_id is None: try: member = self.client().create_lbaas_member( self.pool_id, {'member': properties})['member'] self.resource_id_set(member['id']) except Exception as ex: if self.client_plugin().is_invalid(ex): return False raise return self._check_lb_status() def _show_resource(self): member = self.client().show_lbaas_member(self.resource_id, self.pool_id) return member['member'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): self._update_called = False return prop_diff def check_update_complete(self, prop_diff): if not prop_diff: return True if not self._update_called: try: self.client().update_lbaas_member(self.resource_id, self.pool_id, {'member': prop_diff}) self._update_called = True except Exception as ex: if self.client_plugin().is_invalid(ex): return False raise return self._check_lb_status() def handle_delete(self): self._delete_called = False def check_delete_complete(self, data): if self.resource_id is None: return True if not self._delete_called: try: self.client().delete_lbaas_member(self.resource_id, self.pool_id) self._delete_called = True except Exception as ex: if self.client_plugin().is_invalid(ex): return False elif self.client_plugin().is_not_found(ex): return True raise return self._check_lb_status()
class Router(neutron.NeutronResource): """A resource that implements Neutron router. Router is a physical or virtual network device that passes network traffic between different networks. """ required_service_extension = 'router' PROPERTIES = ( NAME, EXTERNAL_GATEWAY, VALUE_SPECS, ADMIN_STATE_UP, L3_AGENT_ID, L3_AGENT_IDS, DISTRIBUTED, HA, ) = ('name', 'external_gateway_info', 'value_specs', 'admin_state_up', 'l3_agent_id', 'l3_agent_ids', 'distributed', 'ha') _EXTERNAL_GATEWAY_KEYS = ( EXTERNAL_GATEWAY_NETWORK, EXTERNAL_GATEWAY_ENABLE_SNAT, EXTERNAL_GATEWAY_FIXED_IPS, ) = ( 'network', 'enable_snat', 'external_fixed_ips', ) _EXTERNAL_GATEWAY_FIXED_IPS_KEYS = (IP_ADDRESS, SUBNET) = ('ip_address', 'subnet') ATTRIBUTES = ( STATUS, EXTERNAL_GATEWAY_INFO_ATTR, NAME_ATTR, ADMIN_STATE_UP_ATTR, TENANT_ID, ) = ( 'status', 'external_gateway_info', 'name', 'admin_state_up', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('The name of the router.'), update_allowed=True), EXTERNAL_GATEWAY: properties.Schema( properties.Schema.MAP, _('External network gateway configuration for a router.'), schema={ EXTERNAL_GATEWAY_NETWORK: properties.Schema( properties.Schema.STRING, _('ID or name of the external network for the gateway.'), required=True, update_allowed=True), EXTERNAL_GATEWAY_ENABLE_SNAT: properties.Schema( properties.Schema.BOOLEAN, _('Enables Source NAT on the router gateway. NOTE: The ' 'default policy setting in Neutron restricts usage of ' 'this property to administrative users only.'), update_allowed=True), EXTERNAL_GATEWAY_FIXED_IPS: properties.Schema( properties.Schema.LIST, _('External fixed IP addresses for the gateway.'), schema=properties.Schema( properties.Schema.MAP, schema={ IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('External fixed IP address.'), constraints=[ constraints.CustomConstraint('ip_addr'), ]), SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet of external fixed IP address.'), constraints=[ constraints.CustomConstraint( 'neutron.subnet') ]), }), update_allowed=True, support_status=support.SupportStatus(version='6.0.0')), }, update_allowed=True), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the creation request.'), default={}, update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of the router.'), default=True, update_allowed=True), L3_AGENT_ID: properties.Schema( properties.Schema.STRING, _('ID of the L3 agent. NOTE: The default policy setting in ' 'Neutron restricts usage of this property to administrative ' 'users only.'), update_allowed=True, support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2015.1', message=_('Use property %s.') % L3_AGENT_IDS, previous_status=support.SupportStatus(version='2014.1'))), ), L3_AGENT_IDS: properties.Schema( properties.Schema.LIST, _('ID list of the L3 agent. User can specify multi-agents ' 'for highly available router. NOTE: The default policy ' 'setting in Neutron restricts usage of this property to ' 'administrative users only.'), schema=properties.Schema(properties.Schema.STRING, ), update_allowed=True, support_status=support.SupportStatus(version='2015.1')), DISTRIBUTED: properties.Schema( properties.Schema.BOOLEAN, _('Indicates whether or not to create a distributed router. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only. This property ' 'can not be used in conjunction with the L3 agent ID.'), support_status=support.SupportStatus(version='2015.1')), HA: properties.Schema( properties.Schema.BOOLEAN, _('Indicates whether or not to create a highly available router. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only. And now neutron ' 'do not support distributed and ha at the same time.'), support_status=support.SupportStatus(version='2015.1')), } attributes_schema = { STATUS: attributes.Schema(_("The status of the router."), type=attributes.Schema.STRING), EXTERNAL_GATEWAY_INFO_ATTR: attributes.Schema(_("Gateway network for the router."), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_("Friendly name of the router."), type=attributes.Schema.STRING), ADMIN_STATE_UP_ATTR: attributes.Schema(_("Administrative state of the router."), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_("Tenant owning the router."), type=attributes.Schema.STRING), } def translation_rules(self, props): rules = [ translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network'), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [ self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_FIXED_IPS, self.SUBNET ], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet') ] if props.get(self.L3_AGENT_ID): rules.extend([ translation.TranslationRule(props, translation.TranslationRule.ADD, [self.L3_AGENT_IDS], [props.get(self.L3_AGENT_ID)]), translation.TranslationRule(props, translation.TranslationRule.DELETE, [self.L3_AGENT_ID]) ]) return rules def validate(self): super(Router, self).validate() is_distributed = self.properties[self.DISTRIBUTED] l3_agent_id = self.properties[self.L3_AGENT_ID] l3_agent_ids = self.properties[self.L3_AGENT_IDS] is_ha = self.properties[self.HA] if l3_agent_id and l3_agent_ids: raise exception.ResourcePropertyConflict(self.L3_AGENT_ID, self.L3_AGENT_IDS) # do not specific l3 agent when creating a distributed router if is_distributed and (l3_agent_id or l3_agent_ids): raise exception.ResourcePropertyConflict( self.DISTRIBUTED, "/".join([self.L3_AGENT_ID, self.L3_AGENT_IDS])) if is_ha and is_distributed: raise exception.ResourcePropertyConflict(self.DISTRIBUTED, self.HA) if not is_ha and l3_agent_ids and len(l3_agent_ids) > 1: msg = _('Non HA routers can only have one L3 agent.') raise exception.StackValidationFailed(message=msg) def add_dependencies(self, deps): super(Router, self).add_dependencies(deps) external_gw = self.properties[self.EXTERNAL_GATEWAY] if external_gw: external_gw_net = external_gw.get(self.EXTERNAL_GATEWAY_NETWORK) for res in six.itervalues(self.stack): if res.has_interface('OS::Neutron::Subnet'): subnet_net = res.properties.get(subnet.Subnet.NETWORK) if subnet_net == external_gw_net: deps += (self, res) def _resolve_gateway(self, props): gateway = props.get(self.EXTERNAL_GATEWAY) if gateway: gateway['network_id'] = gateway.pop(self.EXTERNAL_GATEWAY_NETWORK) if gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] is None: del gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] if gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] is None: del gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] else: self._resolve_subnet(gateway) return props def _get_l3_agent_list(self, props): l3_agent_id = props.pop(self.L3_AGENT_ID, None) l3_agent_ids = props.pop(self.L3_AGENT_IDS, None) if not l3_agent_ids and l3_agent_id: l3_agent_ids = [l3_agent_id] return l3_agent_ids def _resolve_subnet(self, gateway): external_gw_fixed_ips = gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] for fixed_ip in external_gw_fixed_ips: for key, value in six.iteritems(fixed_ip): if value is None: fixed_ip.pop(key) if self.SUBNET in fixed_ip: fixed_ip['subnet_id'] = fixed_ip.pop(self.SUBNET) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) self._resolve_gateway(props) l3_agent_ids = self._get_l3_agent_list(props) router = self.client().create_router({'router': props})['router'] self.resource_id_set(router['id']) if l3_agent_ids: self._replace_agent(l3_agent_ids) def _show_resource(self): return self.client().show_router(self.resource_id)['router'] def check_create_complete(self, *args): attributes = self._show_resource() return self.is_built(attributes) def handle_delete(self): if not self.resource_id: return try: self.client().delete_router(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): if self.EXTERNAL_GATEWAY in prop_diff: self._resolve_gateway(prop_diff) if self.L3_AGENT_IDS in prop_diff or self.L3_AGENT_ID in prop_diff: l3_agent_ids = self._get_l3_agent_list(prop_diff) self._replace_agent(l3_agent_ids) if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_router(self.resource_id, {'router': prop_diff}) def _replace_agent(self, l3_agent_ids=None): ret = self.client().list_l3_agent_hosting_routers(self.resource_id) for agent in ret['agents']: self.client().remove_router_from_l3_agent(agent['id'], self.resource_id) if l3_agent_ids: for l3_agent_id in l3_agent_ids: self.client().add_router_to_l3_agent( l3_agent_id, {'router_id': self.resource_id})
class HeatWaitCondition(resource.Resource): """Resource for handling signals received by WaitConditionHandle. Resource takes WaitConditionHandle and starts to create. Resource is in CREATE_IN_PROGRESS status until WaitConditionHandle doesn't receive sufficient number of successful signals (this number can be specified with count property) and successfully creates after that, or fails due to timeout. """ support_status = support.SupportStatus(version='2014.2') PROPERTIES = ( HANDLE, TIMEOUT, COUNT, ) = ( 'handle', 'timeout', 'count', ) ATTRIBUTES = (DATA, ) = ('data', ) properties_schema = { HANDLE: properties.Schema( properties.Schema.STRING, _('A reference to the wait condition handle used to signal this ' 'wait condition.'), required=True), TIMEOUT: properties.Schema( properties.Schema.NUMBER, _('The number of seconds to wait for the correct number of ' 'signals to arrive.'), required=True, constraints=[ constraints.Range(1, 43200), ]), COUNT: properties.Schema( properties.Schema.INTEGER, _('The number of success signals that must be received before ' 'the stack creation process continues.'), constraints=[ constraints.Range(min=1), ], default=1, update_allowed=True), } attributes_schema = { DATA: attributes.Schema(_('JSON string containing data associated with wait ' 'condition signals sent to the handle.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), } def __init__(self, name, definition, stack): super(HeatWaitCondition, self).__init__(name, definition, stack) def _get_handle_resource(self): return self.stack.resource_by_refid(self.properties[self.HANDLE]) def _validate_handle_resource(self, handle): if not isinstance(handle, wc_base.BaseWaitConditionHandle): raise ValueError( _('%(name)s is not a valid wait condition ' 'handle.') % {'name': handle.name}) def _wait(self, handle, started_at, timeout_in): if timeutils.is_older_than(started_at, timeout_in): exc = wc_base.WaitConditionTimeout(self, handle) LOG.info(_LI('%(name)s Timed out (%(timeout)s)'), { 'name': str(self), 'timeout': str(exc) }) raise exc handle_status = handle.get_status() if any(s != handle.STATUS_SUCCESS for s in handle_status): failure = wc_base.WaitConditionFailure(self, handle) LOG.info(_LI('%(name)s Failed (%(failure)s)'), { 'name': str(self), 'failure': str(failure) }) raise failure if len(handle_status) >= self.properties[self.COUNT]: LOG.info(_LI("%s Succeeded"), str(self)) return True return False def handle_create(self): handle = self._get_handle_resource() self._validate_handle_resource(handle) started_at = timeutils.utcnow() return handle, started_at, float(self.properties[self.TIMEOUT]) def check_create_complete(self, data): return self._wait(*data) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.properties = json_snippet.properties(self.properties_schema, self.context) handle = self._get_handle_resource() started_at = timeutils.utcnow() return handle, started_at, float(self.properties[self.TIMEOUT]) def check_update_complete(self, data): return self._wait(*data) def handle_delete(self): handle = self._get_handle_resource() if handle and handle.id and handle.action != handle.INIT: handle.metadata_set({}) def _resolve_attribute(self, key): handle = self._get_handle_resource() if key == self.DATA: meta = handle.metadata_get(refresh=True) res = {k: meta[k][handle.DATA] for k in meta} LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s' % { 'name': self.name, 'key': key, 'res': res }) return six.text_type(jsonutils.dumps(res))
class NetworkGateway(neutron.NeutronResource): """Network Gateway resource in Neutron Network Gateway. Resource for connecting internal networks with specified devices. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, DEVICES, CONNECTIONS, ) = ( 'name', 'devices', 'connections', ) ATTRIBUTES = (DEFAULT, ) = ('default', ) _DEVICES_KEYS = ( ID, INTERFACE_NAME, ) = ( 'id', 'interface_name', ) _CONNECTIONS_KEYS = ( NETWORK_ID, NETWORK, SEGMENTATION_TYPE, SEGMENTATION_ID, ) = ( 'network_id', 'network', 'segmentation_type', 'segmentation_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, description=_('The name of the network gateway.'), update_allowed=True), DEVICES: properties.Schema( properties.Schema.LIST, description=_('Device info for this network gateway.'), required=True, constraints=[constraints.Length(min=1)], update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ ID: properties.Schema(properties.Schema.STRING, description=_( 'The device id for the network ' 'gateway.'), required=True), INTERFACE_NAME: properties.Schema(properties.Schema.STRING, description=_( 'The interface name for the ' 'network gateway.'), required=True) })), CONNECTIONS: properties.Schema( properties.Schema.LIST, description=_('Connection info for this network gateway.'), default={}, update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % NETWORK, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[ constraints.CustomConstraint('neutron.network') ], ), NETWORK: properties.Schema( properties.Schema.STRING, description=_('The internal network to connect on ' 'the network gateway.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[ constraints.CustomConstraint('neutron.network') ], ), SEGMENTATION_TYPE: properties.Schema( properties.Schema.STRING, description=_( 'L2 segmentation strategy on the external ' 'side of the network gateway.'), default='flat', constraints=[ constraints.AllowedValues(('flat', 'vlan')) ]), SEGMENTATION_ID: properties.Schema( properties.Schema.INTEGER, description=_( 'The id for L2 segment on the external side ' 'of the network gateway. Must be specified ' 'when using vlan.'), constraints=[constraints.Range(0, 4094)]) })) } attributes_schema = { DEFAULT: attributes.Schema(_("A boolean value of default flag."), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.CONNECTIONS, self.NETWORK], value_name=self.NETWORK_ID), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.CONNECTIONS, self.NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network') ] def _show_resource(self): return self.client().show_network_gateway( self.resource_id)['network_gateway'] def validate(self): """Validate any of the provided params.""" super(NetworkGateway, self).validate() connections = self.properties[self.CONNECTIONS] for connection in connections: segmentation_type = connection[self.SEGMENTATION_TYPE] segmentation_id = connection.get(self.SEGMENTATION_ID) if segmentation_type == 'vlan' and segmentation_id is None: msg = _("segmentation_id must be specified for using vlan") raise exception.StackValidationFailed(message=msg) if segmentation_type == 'flat' and segmentation_id: msg = _("segmentation_id cannot be specified except 0 for " "using flat") raise exception.StackValidationFailed(message=msg) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) connections = props.pop(self.CONNECTIONS) ret = self.client().create_network_gateway({'network_gateway': props})['network_gateway'] self.resource_id_set(ret['id']) for connection in connections: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().connect_network_gateway(ret['id'], connection) def handle_delete(self): if not self.resource_id: return connections = self.properties[self.CONNECTIONS] for connection in connections: with self.client_plugin().ignore_not_found: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().disconnect_network_gateway( self.resource_id, connection) try: self.client().delete_network_gateway(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): connections = None if self.CONNECTIONS in prop_diff: connections = prop_diff.pop(self.CONNECTIONS) if self.DEVICES in prop_diff: self.handle_delete() self.properties.data.update(prop_diff) self.handle_create() return if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_network_gateway( self.resource_id, {'network_gateway': prop_diff}) if connections: for connection in self.properties[self.CONNECTIONS]: with self.client_plugin().ignore_not_found: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().disconnect_network_gateway( self.resource_id, connection) for connection in connections: if self.NETWORK in connection: connection['network_id'] = connection.pop(self.NETWORK) self.client().connect_network_gateway(self.resource_id, connection)
class AutoScalingPolicy(signal_responder.SignalResponder, cooldown.CooldownMixin): """A resource to manage scaling of `OS::Heat::AutoScalingGroup`. **Note** while it may incidentally support `AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that purpose and use `AWS::AutoScaling::ScalingPolicy` instead. Resource to manage scaling for `OS::Heat::AutoScalingGroup`, i.e. define which metric should be scaled and scaling adjustment, set cooldown etc. """ PROPERTIES = (AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE, COOLDOWN, MIN_ADJUSTMENT_STEP) = ( 'auto_scaling_group_id', 'scaling_adjustment', 'adjustment_type', 'cooldown', 'min_adjustment_step', ) ATTRIBUTES = (ALARM_URL, SIGNAL_URL) = ('alarm_url', 'signal_url') properties_schema = { # TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID AUTO_SCALING_GROUP_NAME: properties.Schema(properties.Schema.STRING, _('AutoScaling group ID to apply policy to.'), required=True), SCALING_ADJUSTMENT: properties.Schema(properties.Schema.NUMBER, _('Size of adjustment.'), required=True, update_allowed=True), ADJUSTMENT_TYPE: properties.Schema(properties.Schema.STRING, _('Type of adjustment (absolute or percentage).'), required=True, constraints=[ constraints.AllowedValues([ sc_util.CHANGE_IN_CAPACITY, sc_util.EXACT_CAPACITY, sc_util.PERCENT_CHANGE_IN_CAPACITY ]), ], update_allowed=True), COOLDOWN: properties.Schema(properties.Schema.NUMBER, _('Cooldown period, in seconds.'), update_allowed=True), MIN_ADJUSTMENT_STEP: properties.Schema( properties.Schema.INTEGER, _('Minimum number of resources that are added or removed ' 'when the AutoScaling group scales up or down. This can ' 'be used only when specifying percent_change_in_capacity ' 'for the adjustment_type property.'), constraints=[ constraints.Range(min=0, ), ], update_allowed=True), } attributes_schema = { ALARM_URL: attributes.Schema(_("A signed url to handle the alarm."), type=attributes.Schema.STRING), SIGNAL_URL: attributes.Schema( _("A url to handle the alarm using native API."), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), } def validate(self): """Add validation for min_adjustment_step.""" super(AutoScalingPolicy, self).validate() self._validate_min_adjustment_step() def _validate_min_adjustment_step(self): adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE) adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP) if (adjustment_type != sc_util.PERCENT_CHANGE_IN_CAPACITY and adjustment_step is not None): raise exception.ResourcePropertyValueDependency( prop1=self.MIN_ADJUSTMENT_STEP, prop2=self.ADJUSTMENT_TYPE, value=sc_util.PERCENT_CHANGE_IN_CAPACITY) def handle_metadata_reset(self): metadata = self.metadata_get() if 'scaling_in_progress' in metadata: metadata['scaling_in_progress'] = False self.metadata_set(metadata) def handle_create(self): super(AutoScalingPolicy, self).handle_create() self.resource_id_set(self._get_user_id()) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Updates self.properties, if Properties has changed. If Properties has changed, update self.properties, so we get the new values during any subsequent adjustment. """ if prop_diff: self.properties = json_snippet.properties(self.properties_schema, self.context) def handle_signal(self, details=None): # ceilometer sends details like this: # {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm', # u'reason': u'...'}) # in this policy we currently assume that this gets called # only when there is an alarm. But the template writer can # put the policy in all the alarm notifiers (nodata, and ok). # # our watchrule has upper case states so lower() them all. if details is None: alarm_state = 'alarm' else: alarm_state = details.get('current', details.get('state', 'alarm')).lower() LOG.info(_LI('Alarm %(name)s, new state %(state)s'), { 'name': self.name, 'state': alarm_state }) if alarm_state != 'alarm': raise exception.NoActionRequired() if not self._is_scaling_allowed(): LOG.info( _LI("%(name)s NOT performing scaling action, " "cooldown %(cooldown)s"), { 'name': self.name, 'cooldown': self.properties[self.COOLDOWN] }) raise exception.NoActionRequired() asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME] group = self.stack.resource_by_refid(asgn_id) changed_size = False try: if group is None: raise exception.NotFound( _('Alarm %(alarm)s could not find ' 'scaling group named "%(group)s"') % { 'alarm': self.name, 'group': asgn_id }) LOG.info( _LI('%(name)s Alarm, adjusting Group %(group)s with id ' '%(asgn_id)s by %(filter)s'), { 'name': self.name, 'group': group.name, 'asgn_id': asgn_id, 'filter': self.properties[self.SCALING_ADJUSTMENT] }) changed_size = group.adjust( self.properties[self.SCALING_ADJUSTMENT], self.properties[self.ADJUSTMENT_TYPE], self.properties[self.MIN_ADJUSTMENT_STEP]) finally: self._finished_scaling("%s : %s" % (self.properties[self.ADJUSTMENT_TYPE], self.properties[self.SCALING_ADJUSTMENT]), changed_size=changed_size) def _resolve_attribute(self, name): if self.resource_id is None: return if name == self.ALARM_URL: return six.text_type(self._get_ec2_signed_url()) elif name == self.SIGNAL_URL: return six.text_type(self._get_heat_signal_url()) def get_reference_id(self): return resource.Resource.get_reference_id(self)
class MeteringRule(neutron.NeutronResource): """A resource to create rule for some label. Resource for allowing specified label to measure the traffic for a specific set of ip range. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( METERING_LABEL_ID, REMOTE_IP_PREFIX, DIRECTION, EXCLUDED, ) = ( 'metering_label_id', 'remote_ip_prefix', 'direction', 'excluded', ) ATTRIBUTES = ( DIRECTION_ATTR, EXCLUDED_ATTR, METERING_LABEL_ID_ATTR, REMOTE_IP_PREFIX_ATTR, ) = ( 'direction', 'excluded', 'metering_label_id', 'remote_ip_prefix', ) properties_schema = { METERING_LABEL_ID: properties.Schema( properties.Schema.STRING, _('The metering label ID to associate with this metering rule.'), required=True ), REMOTE_IP_PREFIX: properties.Schema( properties.Schema.STRING, _('Indicates remote IP prefix to be associated with this ' 'metering rule.'), required=True, ), DIRECTION: properties.Schema( properties.Schema.STRING, _('The direction in which metering rule is applied, ' 'either ingress or egress.'), default='ingress', constraints=[constraints.AllowedValues(( 'ingress', 'egress'))] ), EXCLUDED: properties.Schema( properties.Schema.BOOLEAN, _('Specify whether the remote_ip_prefix will be excluded or ' 'not from traffic counters of the metering label. For example ' 'to not count the traffic of a specific IP address of a range.'), default='False' ) } attributes_schema = { DIRECTION_ATTR: attributes.Schema( _('The direction in which metering rule is applied.'), type=attributes.Schema.STRING ), EXCLUDED_ATTR: attributes.Schema( _('Exclude state for cidr.'), type=attributes.Schema.STRING ), METERING_LABEL_ID_ATTR: attributes.Schema( _('The metering label ID to associate with this metering rule.'), type=attributes.Schema.STRING ), REMOTE_IP_PREFIX_ATTR: attributes.Schema( _('CIDR to be associated with this metering rule.'), type=attributes.Schema.STRING ), } def handle_create(self): props = self.prepare_properties( self.properties, self.physical_resource_name()) metering_label_rule = self.client().create_metering_label_rule( {'metering_label_rule': props})['metering_label_rule'] self.resource_id_set(metering_label_rule['id']) def _show_resource(self): return self.client().show_metering_label_rule( self.resource_id)['metering_label_rule'] def handle_delete(self): if not self.resource_id: return try: self.client().delete_metering_label_rule(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class MeteringLabel(neutron.NeutronResource): """A resource for creating neutron metering label. The idea is to meter this at the L3 routers levels. The point is to allow operators to configure IP ranges and to assign a label to them. For example we will be able to set two labels; one for the internal traffic, and the other one for the external traffic. Each label will measure the traffic for a specific set of IP range. Then, bandwidth measurement will be sent for each label to the Oslo notification system and could be collected by Ceilometer. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, DESCRIPTION, SHARED, ) = ( 'name', 'description', 'shared', ) ATTRIBUTES = ( NAME_ATTR, DESCRIPTION_ATTR, SHARED_ATTR, ) = ( 'name', 'description', 'shared', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of the metering label.') ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of the metering label.'), ), SHARED: properties.Schema( properties.Schema.BOOLEAN, _('Whether the metering label should be shared ' 'across all tenants.'), default=False, support_status=support.SupportStatus(version='2015.1'), ), } attributes_schema = { NAME_ATTR: attributes.Schema( _('Name of the metering label.'), type=attributes.Schema.STRING ), DESCRIPTION_ATTR: attributes.Schema( _('Description of the metering label.'), type=attributes.Schema.STRING ), SHARED_ATTR: attributes.Schema( _('Shared status of the metering label.'), type=attributes.Schema.STRING ), } def handle_create(self): props = self.prepare_properties( self.properties, self.physical_resource_name()) metering_label = self.client().create_metering_label( {'metering_label': props})['metering_label'] self.resource_id_set(metering_label['id']) def _show_resource(self): return self.client().show_metering_label( self.resource_id)['metering_label'] def handle_delete(self): if not self.resource_id: return try: self.client().delete_metering_label(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class IPsecPolicy(neutron.NeutronResource): """A resource for IPsec policy in Neutron. The IP security policy specifying the authentication and encryption algorithm, and encapsulation mode used for the established VPN connection. """ required_service_extension = 'vpnaas' PROPERTIES = ( NAME, DESCRIPTION, TRANSFORM_PROTOCOL, ENCAPSULATION_MODE, AUTH_ALGORITHM, ENCRYPTION_ALGORITHM, LIFETIME, PFS, ) = ( 'name', 'description', 'transform_protocol', 'encapsulation_mode', 'auth_algorithm', 'encryption_algorithm', 'lifetime', 'pfs', ) _LIFETIME_KEYS = ( LIFETIME_UNITS, LIFETIME_VALUE, ) = ( 'units', 'value', ) ATTRIBUTES = ( AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCAPSULATION_MODE_ATTR, ENCRYPTION_ALGORITHM_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR, TENANT_ID, TRANSFORM_PROTOCOL_ATTR, ) = ( 'auth_algorithm', 'description', 'encapsulation_mode', 'encryption_algorithm', 'lifetime', 'name', 'pfs', 'tenant_id', 'transform_protocol', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the ipsec policy.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the ipsec policy.'), update_allowed=True), TRANSFORM_PROTOCOL: properties.Schema(properties.Schema.STRING, _('Transform protocol for the ipsec policy.'), default='esp', constraints=[ constraints.AllowedValues( ['esp', 'ah', 'ah-esp']), ]), ENCAPSULATION_MODE: properties.Schema(properties.Schema.STRING, _('Encapsulation mode for the ipsec policy.'), default='tunnel', constraints=[ constraints.AllowedValues( ['tunnel', 'transport']), ]), AUTH_ALGORITHM: properties.Schema( properties.Schema.STRING, _('Authentication hash algorithm for the ipsec policy.'), default='sha1', constraints=[ constraints.AllowedValues(['sha1']), ]), ENCRYPTION_ALGORITHM: properties.Schema(properties.Schema.STRING, _('Encryption algorithm for the ipsec policy.'), default='aes-128', constraints=[ constraints.AllowedValues( ['3des', 'aes-128', 'aes-192', 'aes-256']), ]), LIFETIME: properties.Schema( properties.Schema.MAP, _('Safety assessment lifetime configuration for the ipsec ' 'policy.'), schema={ LIFETIME_UNITS: properties.Schema(properties.Schema.STRING, _('Safety assessment lifetime units.'), default='seconds', constraints=[ constraints.AllowedValues( ['seconds', 'kilobytes']), ]), LIFETIME_VALUE: properties.Schema( properties.Schema.INTEGER, _('Safety assessment lifetime value in specified ' 'units.'), default=3600), }), PFS: properties.Schema(properties.Schema.STRING, _('Perfect forward secrecy for the ipsec policy.'), default='group5', constraints=[ constraints.AllowedValues( ['group2', 'group5', 'group14']), ]), } attributes_schema = { AUTH_ALGORITHM_ATTR: attributes.Schema( _('The authentication hash algorithm of the ipsec policy.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the ipsec policy.'), type=attributes.Schema.STRING), ENCAPSULATION_MODE_ATTR: attributes.Schema(_('The encapsulation mode of the ipsec policy.'), type=attributes.Schema.STRING), ENCRYPTION_ALGORITHM_ATTR: attributes.Schema(_('The encryption algorithm of the ipsec policy.'), type=attributes.Schema.STRING), LIFETIME_ATTR: attributes.Schema(_( 'The safety assessment lifetime configuration of the ipsec ' 'policy.'), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_('The name of the ipsec policy.'), type=attributes.Schema.STRING), PFS_ATTR: attributes.Schema( _('The perfect forward secrecy of the ipsec policy.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema( _('The unique identifier of the tenant owning the ipsec policy.'), type=attributes.Schema.STRING), TRANSFORM_PROTOCOL_ATTR: attributes.Schema(_('The transform protocol of the ipsec policy.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_ipsecpolicy(self.resource_id)['ipsecpolicy'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) ipsecpolicy = self.client().create_ipsecpolicy({'ipsecpolicy': props})['ipsecpolicy'] self.resource_id_set(ipsecpolicy['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_ipsecpolicy(self.resource_id, {'ipsecpolicy': prop_diff}) def handle_delete(self): if not self.resource_id: return try: self.client().delete_ipsecpolicy(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class IKEPolicy(neutron.NeutronResource): """A resource for IKE policy in Neutron. The Internet Key Exchange policy identifyies the authentication and encryption algorithm used during phase one and phase two negotiation of a VPN connection. """ required_service_extension = 'vpnaas' PROPERTIES = ( NAME, DESCRIPTION, AUTH_ALGORITHM, ENCRYPTION_ALGORITHM, PHASE1_NEGOTIATION_MODE, LIFETIME, PFS, IKE_VERSION, ) = ( 'name', 'description', 'auth_algorithm', 'encryption_algorithm', 'phase1_negotiation_mode', 'lifetime', 'pfs', 'ike_version', ) _LIFETIME_KEYS = ( LIFETIME_UNITS, LIFETIME_VALUE, ) = ( 'units', 'value', ) ATTRIBUTES = ( AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCRYPTION_ALGORITHM_ATTR, IKE_VERSION_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR, PHASE1_NEGOTIATION_MODE_ATTR, TENANT_ID, ) = ( 'auth_algorithm', 'description', 'encryption_algorithm', 'ike_version', 'lifetime', 'name', 'pfs', 'phase1_negotiation_mode', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the ike policy.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the ike policy.'), update_allowed=True), AUTH_ALGORITHM: properties.Schema( properties.Schema.STRING, _('Authentication hash algorithm for the ike policy.'), default='sha1', constraints=[ constraints.AllowedValues(['sha1']), ]), ENCRYPTION_ALGORITHM: properties.Schema(properties.Schema.STRING, _('Encryption algorithm for the ike policy.'), default='aes-128', constraints=[ constraints.AllowedValues( ['3des', 'aes-128', 'aes-192', 'aes-256']), ]), PHASE1_NEGOTIATION_MODE: properties.Schema(properties.Schema.STRING, _('Negotiation mode for the ike policy.'), default='main', constraints=[ constraints.AllowedValues(['main']), ]), LIFETIME: properties.Schema( properties.Schema.MAP, _('Safety assessment lifetime configuration for the ike policy.'), schema={ LIFETIME_UNITS: properties.Schema(properties.Schema.STRING, _('Safety assessment lifetime units.'), default='seconds', constraints=[ constraints.AllowedValues( ['seconds', 'kilobytes']), ]), LIFETIME_VALUE: properties.Schema( properties.Schema.INTEGER, _('Safety assessment lifetime value in specified ' 'units.'), default=3600), }), PFS: properties.Schema( properties.Schema.STRING, _('Perfect forward secrecy in lowercase for the ike policy.'), default='group5', constraints=[ constraints.AllowedValues(['group2', 'group5', 'group14']), ]), IKE_VERSION: properties.Schema(properties.Schema.STRING, _('Version for the ike policy.'), default='v1', constraints=[ constraints.AllowedValues(['v1', 'v2']), ]), } attributes_schema = { AUTH_ALGORITHM_ATTR: attributes.Schema( _('The authentication hash algorithm used by the ike policy.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the ike policy.'), type=attributes.Schema.STRING), ENCRYPTION_ALGORITHM_ATTR: attributes.Schema( _('The encryption algorithm used by the ike policy.'), type=attributes.Schema.STRING), IKE_VERSION_ATTR: attributes.Schema(_('The version of the ike policy.'), type=attributes.Schema.STRING), LIFETIME_ATTR: attributes.Schema(_( 'The safety assessment lifetime configuration for the ike ' 'policy.'), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_('The name of the ike policy.'), type=attributes.Schema.STRING), PFS_ATTR: attributes.Schema(_('The perfect forward secrecy of the ike policy.'), type=attributes.Schema.STRING), PHASE1_NEGOTIATION_MODE_ATTR: attributes.Schema(_('The negotiation mode of the ike policy.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema( _('The unique identifier of the tenant owning the ike policy.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_ikepolicy(self.resource_id)['ikepolicy'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) ikepolicy = self.client().create_ikepolicy({'ikepolicy': props})['ikepolicy'] self.resource_id_set(ikepolicy['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_ikepolicy(self.resource_id, {'ikepolicy': prop_diff}) def handle_delete(self): if not self.resource_id: return try: self.client().delete_ikepolicy(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class VPNService(neutron.NeutronResource): """A resource for VPN service in Neutron. VPN service is a high level object that associates VPN with a specific subnet and router. """ required_service_extension = 'vpnaas' PROPERTIES = (NAME, DESCRIPTION, ADMIN_STATE_UP, SUBNET_ID, SUBNET, ROUTER_ID, ROUTER) = ('name', 'description', 'admin_state_up', 'subnet_id', 'subnet', 'router_id', 'router') ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, DESCRIPTION_ATTR, NAME_ATTR, ROUTER_ID_ATTR, STATUS, SUBNET_ID_ATTR, TENANT_ID, ) = ( 'admin_state_up', 'description', 'name', 'router_id', 'status', 'subnet_id', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the vpn service.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the vpn service.'), update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('Administrative state for the vpn service.'), default=True, update_allowed=True), SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % SUBNET, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.subnet')]), SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet in which the vpn service will be created.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.subnet')]), ROUTER_ID: properties.Schema( properties.Schema.STRING, _('Unique identifier for the router to which the vpn service ' 'will be inserted.'), support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s') % ROUTER, version='2015.1', previous_status=support.SupportStatus(version='2013.2'))), constraints=[constraints.CustomConstraint('neutron.router')]), ROUTER: properties.Schema( properties.Schema.STRING, _('The router to which the vpn service will be inserted.'), support_status=support.SupportStatus(version='2015.1'), required=True, constraints=[constraints.CustomConstraint('neutron.router')]) } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema(_('The administrative state of the vpn service.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('The description of the vpn service.'), type=attributes.Schema.STRING), NAME_ATTR: attributes.Schema(_('The name of the vpn service.'), type=attributes.Schema.STRING), ROUTER_ID_ATTR: attributes.Schema(_( 'The unique identifier of the router to which the vpn service ' 'was inserted.'), type=attributes.Schema.STRING), STATUS: attributes.Schema(_('The status of the vpn service.'), type=attributes.Schema.STRING), SUBNET_ID_ATTR: attributes.Schema(_( 'The unique identifier of the subnet in which the vpn service ' 'was created.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema( _('The unique identifier of the tenant owning the vpn service.'), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.SUBNET], value_path=[self.SUBNET_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet'), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.ROUTER], value_path=[self.ROUTER_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.ROUTER], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='router'), ] def _show_resource(self): return self.client().show_vpnservice(self.resource_id)['vpnservice'] def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['subnet_id'] = props.pop(self.SUBNET) props['router_id'] = props.pop(self.ROUTER) vpnservice = self.client().create_vpnservice({'vpnservice': props})['vpnservice'] self.resource_id_set(vpnservice['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_vpnservice(self.resource_id, {'vpnservice': prop_diff}) def handle_delete(self): try: self.client().delete_vpnservice(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class Pool(neutron.NeutronResource): """A resource for managing LBaaS v2 Pools. This resources manages Neutron-LBaaS v2 Pools, which represent a group of nodes. Pools define the subnet where nodes reside, balancing algorithm, and the nodes themselves. """ support_status = support.SupportStatus(version='6.0.0') required_service_extension = 'lbaasv2' PROPERTIES = ( ADMIN_STATE_UP, DESCRIPTION, SESSION_PERSISTENCE, NAME, LB_ALGORITHM, LISTENER, PROTOCOL, SESSION_PERSISTENCE_TYPE, SESSION_PERSISTENCE_COOKIE_NAME, ) = ('admin_state_up', 'description', 'session_persistence', 'name', 'lb_algorithm', 'listener', 'protocol', 'type', 'cookie_name') SESSION_PERSISTENCE_TYPES = (SOURCE_IP, HTTP_COOKIE, APP_COOKIE) = ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE') ATTRIBUTES = (HEALTHMONITOR_ID_ATTR, LISTENERS_ATTR, MEMBERS_ATTR) = ('healthmonitor_id', 'listeners', 'members') properties_schema = { ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of this pool.'), default=True, update_allowed=True, constraints=[constraints.AllowedValues(['True'])]), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description of this pool.'), update_allowed=True, default=''), SESSION_PERSISTENCE: properties.Schema( properties.Schema.MAP, _('Configuration of session persistence.'), schema={ SESSION_PERSISTENCE_TYPE: properties.Schema( properties.Schema.STRING, _('Method of implementation of session ' 'persistence feature.'), required=True, constraints=[ constraints.AllowedValues(SESSION_PERSISTENCE_TYPES) ]), SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema( properties.Schema.STRING, _('Name of the cookie, ' 'required if type is APP_COOKIE.')) }, ), NAME: properties.Schema(properties.Schema.STRING, _('Name of this pool.'), update_allowed=True), LB_ALGORITHM: properties.Schema( properties.Schema.STRING, _('The algorithm used to distribute load between the members of ' 'the pool.'), required=True, constraints=[ constraints.AllowedValues( ['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']), ], update_allowed=True, ), LISTENER: properties.Schema( properties.Schema.STRING, _('Listner name or ID to be associated with this pool.'), required=True), PROTOCOL: properties.Schema(properties.Schema.STRING, _('Protocol of the pool.'), required=True, constraints=[ constraints.AllowedValues(['TCP', 'HTTP']), ]), } attributes_schema = { HEALTHMONITOR_ID_ATTR: attributes.Schema( _('ID of the health monitor associated with this pool.'), type=attributes.Schema.STRING), LISTENERS_ATTR: attributes.Schema(_('Listener associated with this pool.'), type=attributes.Schema.STRING), MEMBERS_ATTR: attributes.Schema(_('Members associated with this pool.'), type=attributes.Schema.LIST), } def __init__(self, name, definition, stack): super(Pool, self).__init__(name, definition, stack) self._lb_id = None @property def lb_id(self): if self._lb_id is None: listener_id = self.client_plugin().find_resourceid_by_name_or_id( 'listener', self.properties[self.LISTENER]) listener = self.client().show_listener(listener_id)['listener'] self._lb_id = listener['loadbalancers'][0]['id'] return self._lb_id def validate(self): res = super(Pool, self).validate() if res: return res if self.properties[self.SESSION_PERSISTENCE] is not None: session_p = self.properties[self.SESSION_PERSISTENCE] persistence_type = session_p[self.SESSION_PERSISTENCE_TYPE] if persistence_type == self.APP_COOKIE: if not session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME): msg = (_('Property %(cookie)s is required when %(sp)s ' 'type is set to %(app)s.') % { 'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME, 'sp': self.SESSION_PERSISTENCE, 'app': self.APP_COOKIE }) raise exception.StackValidationFailed(message=msg) elif persistence_type == self.SOURCE_IP: if session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME): msg = (_('Property %(cookie)s must NOT be specified when ' '%(sp)s type is set to %(ip)s.') % { 'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME, 'sp': self.SESSION_PERSISTENCE, 'ip': self.SOURCE_IP }) raise exception.StackValidationFailed(message=msg) def _check_lb_status(self): return self.client_plugin().check_lb_status(self.lb_id) def handle_create(self): properties = self.prepare_properties(self.properties, self.physical_resource_name()) self.client_plugin().resolve_listener(properties, self.LISTENER, 'listener_id') session_p = properties.get(self.SESSION_PERSISTENCE) if session_p is not None: session_props = self.prepare_properties(session_p, None) properties[self.SESSION_PERSISTENCE] = session_props return properties def check_create_complete(self, properties): if self.resource_id is None: try: pool = self.client().create_lbaas_pool({'pool': properties})['pool'] self.resource_id_set(pool['id']) except Exception as ex: if self.client_plugin().is_invalid(ex): return False raise return self._check_lb_status() def _show_resource(self): return self.client().show_lbaas_pool(self.resource_id)['pool'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): self._update_called = False return prop_diff def check_update_complete(self, prop_diff): if not prop_diff: return True if not self._update_called: try: self.client().update_lbaas_pool(self.resource_id, {'pool': prop_diff}) self._update_called = True except Exception as ex: if self.client_plugin().is_invalid(ex): return False raise return self._check_lb_status() def handle_delete(self): self._delete_called = False def check_delete_complete(self, data): if self.resource_id is None: return True if not self._delete_called: try: self.client().delete_lbaas_pool(self.resource_id) self._delete_called = True except Exception as ex: if self.client_plugin().is_invalid(ex): return False elif self.client_plugin().is_not_found(ex): return True raise return self._check_lb_status()
class FloatingIP(neutron.NeutronResource): """A resource for managing Neutron floating ips. Floating IP addresses can change their association between routers by action of the user. One of the most common use cases for floating IPs is to provide public IP addresses to a private cloud, where there are a limited number of IP addresses available. Another is for a public cloud user to have a "static" IP address that can be reassigned when an instance is upgraded or moved. """ PROPERTIES = ( FLOATING_NETWORK_ID, FLOATING_NETWORK, VALUE_SPECS, PORT_ID, FIXED_IP_ADDRESS, FLOATING_IP_ADDRESS, ) = ( 'floating_network_id', 'floating_network', 'value_specs', 'port_id', 'fixed_ip_address', 'floating_ip_address', ) ATTRIBUTES = ( ROUTER_ID, TENANT_ID, FLOATING_NETWORK_ID_ATTR, FIXED_IP_ADDRESS_ATTR, FLOATING_IP_ADDRESS_ATTR, PORT_ID_ATTR, ) = ( 'router_id', 'tenant_id', 'floating_network_id', 'fixed_ip_address', 'floating_ip_address', 'port_id', ) properties_schema = { FLOATING_NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % FLOATING_NETWORK, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.network')], ), FLOATING_NETWORK: properties.Schema( properties.Schema.STRING, _('Network to allocate floating IP from.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.network')], ), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the "floatingip" object in the ' 'creation request. Parameters are often specific to installed ' 'hardware or extensions.'), default={}), PORT_ID: properties.Schema( properties.Schema.STRING, _('ID of an existing port with at least one IP address to ' 'associate with this floating IP.'), update_allowed=True, constraints=[constraints.CustomConstraint('neutron.port')]), FIXED_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address to use if the port has multiple addresses.'), update_allowed=True, constraints=[constraints.CustomConstraint('ip_addr')]), FLOATING_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the floating IP. NOTE: The default policy ' 'setting in Neutron restricts usage of this property to ' 'administrative users only.'), constraints=[constraints.CustomConstraint('ip_addr')], support_status=support.SupportStatus(version='5.0.0'), ), } attributes_schema = { ROUTER_ID: attributes.Schema(_( 'ID of the router used as gateway, set when associated with a ' 'port.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_('The tenant owning this floating IP.'), type=attributes.Schema.STRING), FLOATING_NETWORK_ID_ATTR: attributes.Schema( _('ID of the network in which this IP is allocated.'), type=attributes.Schema.STRING), FIXED_IP_ADDRESS_ATTR: attributes.Schema( _('IP address of the associated port, if specified.'), type=attributes.Schema.STRING), FLOATING_IP_ADDRESS_ATTR: attributes.Schema(_('The allocated address of this IP.'), type=attributes.Schema.STRING), PORT_ID_ATTR: attributes.Schema(_('ID of the port associated with this IP.'), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.FLOATING_NETWORK], value_path=[self.FLOATING_NETWORK_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.FLOATING_NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network') ] def add_dependencies(self, deps): super(FloatingIP, self).add_dependencies(deps) for resource in six.itervalues(self.stack): # depend on any RouterGateway in this template with the same # network_id as this floating_network_id if resource.has_interface('OS::Neutron::RouterGateway'): gateway_network = resource.properties.get( router.RouterGateway.NETWORK) or resource.properties.get( router.RouterGateway.NETWORK_ID) floating_network = self.properties[self.FLOATING_NETWORK] if gateway_network == floating_network: deps += (self, resource) # depend on any RouterInterface in this template which interfaces # with the same subnet that this floating IP's port is assigned # to elif resource.has_interface('OS::Neutron::RouterInterface'): def port_on_subnet(resource, subnet): if not resource.has_interface('OS::Neutron::Port'): return False fixed_ips = resource.properties.get(port.Port.FIXED_IPS) if not fixed_ips: p_net = (resource.properties.get(port.Port.NETWORK) or resource.properties.get(port.Port.NETWORK_ID)) if p_net: subnets = self.client().show_network( p_net)['network']['subnets'] return subnet in subnets else: for fixed_ip in resource.properties.get( port.Port.FIXED_IPS): port_subnet = ( fixed_ip.get(port.Port.FIXED_IP_SUBNET) or fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID)) return subnet == port_subnet return False interface_subnet = (resource.properties.get( router.RouterInterface.SUBNET) or resource.properties.get( router.RouterInterface.SUBNET_ID)) # during create we have only unresolved value for functions, so # can not use None value for building correct dependencies if interface_subnet: for d in deps.graph()[self]: if port_on_subnet(d, interface_subnet): deps += (self, resource) break # depend on Router with EXTERNAL_GATEWAY_NETWORK property # this template with the same network_id as this # floating_network_id elif resource.has_interface('OS::Neutron::Router'): gateway = resource.properties.get( router.Router.EXTERNAL_GATEWAY) if gateway: gateway_network = gateway.get( router.Router.EXTERNAL_GATEWAY_NETWORK) floating_network = self.properties[self.FLOATING_NETWORK] if gateway_network == floating_network: deps += (self, resource) def validate(self): super(FloatingIP, self).validate() # fixed_ip_address cannot be specified without a port_id if self.properties[self.PORT_ID] is None and self.properties[ self.FIXED_IP_ADDRESS] is not None: raise exception.ResourcePropertyDependency( prop1=self.FIXED_IP_ADDRESS, prop2=self.PORT_ID) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['floating_network_id'] = props.pop(self.FLOATING_NETWORK) fip = self.client().create_floatingip({'floatingip': props})['floatingip'] self.resource_id_set(fip['id']) def _show_resource(self): return self.client().show_floatingip(self.resource_id)['floatingip'] def handle_delete(self): if not self.resource_id: return with self.client_plugin().ignore_not_found: self.client().delete_floatingip(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: port_id = prop_diff.get(self.PORT_ID, self.properties[self.PORT_ID]) fixed_ip_address = prop_diff.get( self.FIXED_IP_ADDRESS, self.properties[self.FIXED_IP_ADDRESS]) request_body = { 'floatingip': { 'port_id': port_id, 'fixed_ip_address': fixed_ip_address } } self.client().update_floatingip(self.resource_id, request_body)
class QoSPolicy(neutron.NeutronResource): """A resource for Neutron QoS Policy. This QoS policy can be associated with neutron resources, such as port and network, to provide QoS capabilities. The default policy usage of this resource is limited to administrators only. """ required_service_extension = 'qos' support_status = support.SupportStatus(version='6.0.0') PROPERTIES = ( NAME, DESCRIPTION, SHARED, TENANT_ID, ) = ( 'name', 'description', 'shared', 'tenant_id', ) ATTRIBUTES = ( RULES_ATTR, ) = ( 'rules', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('The name for the QoS policy.'), update_allowed=True ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('The description for the QoS policy.'), update_allowed=True ), SHARED: properties.Schema( properties.Schema.BOOLEAN, _('Whether this QoS policy should be shared to other tenants.'), default=False, update_allowed=True ), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The owner tenant ID of this QoS policy.') ), } attributes_schema = { RULES_ATTR: attributes.Schema( _("A list of all rules for the QoS policy."), type=attributes.Schema.LIST ) } def handle_create(self): props = self.prepare_properties( self.properties, self.physical_resource_name()) policy = self.client().create_qos_policy({'policy': props})['policy'] self.resource_id_set(policy['id']) def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: self.client().delete_qos_policy(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_qos_policy( self.resource_id, {'policy': prop_diff}) def _show_resource(self): return self.client().show_qos_policy( self.resource_id)['policy']
class Subnet(resource.Resource): PROPERTIES = ( AVAILABILITY_ZONE, CIDR_BLOCK, VPC_ID, TAGS, ) = ( 'AvailabilityZone', 'CidrBlock', 'VpcId', 'Tags', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) ATTRIBUTES = ( AVAILABILITY_ZONE, ) properties_schema = { AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('Availability zone in which you want the subnet.') ), CIDR_BLOCK: properties.Schema( properties.Schema.STRING, _('CIDR block to apply to subnet.'), required=True ), VPC_ID: properties.Schema( properties.Schema.STRING, _('Ref structure that contains the ID of the VPC on which you ' 'want to create the subnet.'), required=True ), TAGS: properties.Schema( properties.Schema.LIST, schema=properties.Schema( properties.Schema.MAP, _('List of tags to attach to this resource.'), schema={ TAG_KEY: properties.Schema( properties.Schema.STRING, required=True ), TAG_VALUE: properties.Schema( properties.Schema.STRING, required=True ), }, implemented=False, ) ), } attributes_schema = { AVAILABILITY_ZONE: attributes.Schema( _('Availability Zone of the subnet.'), type=attributes.Schema.STRING ), } default_client_name = 'neutron' def handle_create(self): # TODO(sbaker) Verify that this CidrBlock is within the vpc CidrBlock network_id = self.properties.get(self.VPC_ID) props = { 'network_id': network_id, 'cidr': self.properties.get(self.CIDR_BLOCK), 'name': self.physical_resource_name(), 'ip_version': 4 } subnet = self.client().create_subnet({'subnet': props})['subnet'] self.resource_id_set(subnet['id']) router = vpc.VPC.router_for_vpc(self.client(), network_id) if router: self.client().add_interface_router( router['id'], {'subnet_id': subnet['id']}) def handle_delete(self): if self.resource_id is None: return network_id = self.properties.get(self.VPC_ID) subnet_id = self.resource_id with self.client_plugin().ignore_not_found: router = vpc.VPC.router_for_vpc(self.client(), network_id) if router: self.client().remove_interface_router( router['id'], {'subnet_id': subnet_id}) with self.client_plugin().ignore_not_found: self.client().delete_subnet(subnet_id) def _resolve_attribute(self, name): if name == self.AVAILABILITY_ZONE: return self.properties.get(self.AVAILABILITY_ZONE)
class RandomString(resource.Resource): """A resource which generates a random string. This is useful for configuring passwords and secrets on services. Random string can be generated from specified character sequences, which means that all characters will be randomly chosen from specified sequences, or with some classes, e.g. letterdigits, which means that all character will be randomly chosen from union of ascii letters and digits. Output string will be randomly generated string with specified length (or with length of 32, if length property doesn't specified). """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( LENGTH, SEQUENCE, CHARACTER_CLASSES, CHARACTER_SEQUENCES, SALT, ) = ( 'length', 'sequence', 'character_classes', 'character_sequences', 'salt', ) _CHARACTER_CLASSES_KEYS = ( CHARACTER_CLASSES_CLASS, CHARACTER_CLASSES_MIN, ) = ( 'class', 'min', ) _CHARACTER_SEQUENCES = ( CHARACTER_SEQUENCES_SEQUENCE, CHARACTER_SEQUENCES_MIN, ) = ( 'sequence', 'min', ) ATTRIBUTES = (VALUE, ) = ('value', ) properties_schema = { LENGTH: properties.Schema(properties.Schema.INTEGER, _('Length of the string to generate.'), default=32, constraints=[ constraints.Range(1, 512), ]), SEQUENCE: properties.Schema( properties.Schema.STRING, _('Sequence of characters to build the random string from.'), constraints=[ constraints.AllowedValues([ 'lettersdigits', 'letters', 'lowercase', 'uppercase', 'digits', 'hexdigits', 'octdigits' ]), ], support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % CHARACTER_CLASSES, version='2014.2'))), CHARACTER_CLASSES: properties.Schema( properties.Schema.LIST, _('A list of character class and their constraints to generate ' 'the random string from.'), schema=properties.Schema( properties.Schema.MAP, schema={ CHARACTER_CLASSES_CLASS: properties.Schema( properties.Schema.STRING, (_('A character class and its corresponding %(min)s ' 'constraint to generate the random string from.') % { 'min': CHARACTER_CLASSES_MIN }), constraints=[ constraints.AllowedValues([ 'lettersdigits', 'letters', 'lowercase', 'uppercase', 'digits', 'hexdigits', 'octdigits' ]), ], default='lettersdigits'), CHARACTER_CLASSES_MIN: properties.Schema( properties.Schema.INTEGER, _('The minimum number of characters from this ' 'character class that will be in the generated ' 'string.'), default=1, constraints=[ constraints.Range(1, 512), ]) }), # add defaults for backward compatibility default=[{ CHARACTER_CLASSES_CLASS: 'lettersdigits', CHARACTER_CLASSES_MIN: 1 }]), CHARACTER_SEQUENCES: properties.Schema( properties.Schema.LIST, _('A list of character sequences and their constraints to ' 'generate the random string from.'), schema=properties.Schema( properties.Schema.MAP, schema={ CHARACTER_SEQUENCES_SEQUENCE: properties.Schema( properties.Schema.STRING, _('A character sequence and its corresponding %(min)s ' 'constraint to generate the random string ' 'from.') % {'min': CHARACTER_SEQUENCES_MIN}, required=True), CHARACTER_SEQUENCES_MIN: properties.Schema( properties.Schema.INTEGER, _('The minimum number of characters from this ' 'sequence that will be in the generated ' 'string.'), default=1, constraints=[ constraints.Range(1, 512), ]) })), SALT: properties.Schema( properties.Schema.STRING, _('Value which can be set or changed on stack update to trigger ' 'the resource for replacement with a new random string. The ' 'salt value itself is ignored by the random generator.')), } attributes_schema = { VALUE: attributes.Schema(_( 'The random string generated by this resource. This value is ' 'also available by referencing the resource.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), } _sequences = { 'lettersdigits': string.ascii_letters + string.digits, 'letters': string.ascii_letters, 'lowercase': string.ascii_lowercase, 'uppercase': string.ascii_uppercase, 'digits': string.digits, 'hexdigits': string.digits + 'ABCDEF', 'octdigits': string.octdigits } def translation_rules(self, props): if props.get(self.SEQUENCE): return [ translation.TranslationRule( props, translation.TranslationRule.ADD, [self.CHARACTER_CLASSES], [{ self.CHARACTER_CLASSES_CLASS: props.get(self.SEQUENCE), self.CHARACTER_CLASSES_MIN: 1 }]), translation.TranslationRule(props, translation.TranslationRule.DELETE, [self.SEQUENCE]) ] def _generate_random_string(self, char_sequences, char_classes, length): random_string = "" # Add the minimum number of chars from each char sequence & char class if char_sequences: for char_seq in char_sequences: seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE] seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN] for i in six.moves.xrange(seq_min): random_string += random.choice(seq) if char_classes: for char_class in char_classes: cclass_class = char_class[self.CHARACTER_CLASSES_CLASS] cclass_seq = self._sequences[cclass_class] cclass_min = char_class[self.CHARACTER_CLASSES_MIN] for i in six.moves.xrange(cclass_min): random_string += random.choice(cclass_seq) def random_class_char(): cclass_dict = random.choice(char_classes) cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS] cclass_seq = self._sequences[cclass_class] return random.choice(cclass_seq) def random_seq_char(): seq_dict = random.choice(char_sequences) seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE] return random.choice(seq) # Fill up rest with random chars from provided sequences & classes if char_sequences and char_classes: weighted_choices = ([True] * len(char_classes) + [False] * len(char_sequences)) while len(random_string) < length: if random.choice(weighted_choices): random_string += random_class_char() else: random_string += random_seq_char() elif char_sequences: while len(random_string) < length: random_string += random_seq_char() else: while len(random_string) < length: random_string += random_class_char() # Randomize string random_string = ''.join( random.sample(random_string, len(random_string))) return random_string def validate(self): super(RandomString, self).validate() char_sequences = self.properties[self.CHARACTER_SEQUENCES] char_classes = self.properties[self.CHARACTER_CLASSES] def char_min(char_dicts, min_prop): if char_dicts: return sum(char_dict[min_prop] for char_dict in char_dicts) return 0 length = self.properties[self.LENGTH] min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) + char_min(char_classes, self.CHARACTER_CLASSES_MIN)) if min_length > length: msg = _("Length property cannot be smaller than combined " "character class and character sequence minimums") raise exception.StackValidationFailed(message=msg) def handle_create(self): char_sequences = self.properties[self.CHARACTER_SEQUENCES] char_classes = self.properties[self.CHARACTER_CLASSES] length = self.properties[self.LENGTH] random_string = self._generate_random_string(char_sequences, char_classes, length) self.data_set('value', random_string, redact=True) self.resource_id_set(self.physical_resource_name()) def _resolve_attribute(self, name): if name == self.VALUE: return self.data().get(self.VALUE) def get_reference_id(self): if self.resource_id is not None: return self.data().get('value') else: return six.text_type(self.name)
class LoadBalancer(neutron.NeutronResource): """A resource for creating LBaaS v2 Load Balancers. This resource creates and manages Neutron LBaaS v2 Load Balancers, which allows traffic to be directed between servers. """ support_status = support.SupportStatus(version='6.0.0') required_service_extension = 'lbaasv2' PROPERTIES = ( DESCRIPTION, NAME, PROVIDER, VIP_ADDRESS, VIP_SUBNET, ADMIN_STATE_UP, TENANT_ID ) = ( 'description', 'name', 'provider', 'vip_address', 'vip_subnet', 'admin_state_up', 'tenant_id' ) ATTRIBUTES = ( VIP_ADDRESS_ATTR, VIP_PORT_ATTR, VIP_SUBNET_ATTR ) = ( 'vip_address', 'vip_port_id', 'vip_subnet_id' ) properties_schema = { DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of this Load Balancer.'), update_allowed=True, default='' ), NAME: properties.Schema( properties.Schema.STRING, _('Name of this Load Balancer.'), update_allowed=True ), PROVIDER: properties.Schema( properties.Schema.STRING, _('Provider for this Load Balancer.'), constraints=[constraints.AllowedValues(['vlb'])] ), VIP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address for the VIP.'), constraints=[ constraints.CustomConstraint('ip_addr') ], ), VIP_SUBNET: properties.Schema( properties.Schema.STRING, _('The name or ID of the subnet on which to allocate the VIP ' 'address.'), constraints=[ constraints.CustomConstraint('neutron.subnet') ], required=True ), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of this Load Balancer.'), default=True, update_allowed=True, constraints=[constraints.AllowedValues(['True'])] ), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the tenant who owns the Load Balancer. Only ' 'administrative users can specify a tenant ID other than ' 'their own.'), constraints=[ constraints.CustomConstraint('keystone.project') ], ) } attributes_schema = { VIP_ADDRESS_ATTR: attributes.Schema( _('The VIP address of the LoadBalancer.'), type=attributes.Schema.STRING ), VIP_PORT_ATTR: attributes.Schema( _('The VIP port of the LoadBalancer.'), type=attributes.Schema.STRING ), VIP_SUBNET_ATTR: attributes.Schema( _('The VIP subnet of the LoadBalancer.'), type=attributes.Schema.STRING ) } def handle_create(self): properties = self.prepare_properties( self.properties, self.physical_resource_name() ) self.client_plugin().resolve_subnet( properties, self.VIP_SUBNET, 'vip_subnet_id') lb = self.client().create_loadbalancer( {'loadbalancer': properties})['loadbalancer'] self.resource_id_set(lb['id']) def check_create_complete(self, data): return self.client_plugin().check_lb_status(self.resource_id) def _show_resource(self): return self.client().show_loadbalancer( self.resource_id)['loadbalancer'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_loadbalancer( self.resource_id, {'loadbalancer': prop_diff}) return prop_diff def check_update_complete(self, prop_diff): if prop_diff: return self.client_plugin().check_lb_status(self.resource_id) return True def handle_delete(self): pass def check_delete_complete(self, data): if self.resource_id is None: return True try: try: if self.client_plugin().check_lb_status(self.resource_id): self.client().delete_loadbalancer(self.resource_id) except exception.ResourceInError: # Still try to delete loadbalancer in error state self.client().delete_loadbalancer(self.resource_id) except exceptions.NotFound: # Resource is gone return True return False
class SoftwareDeployment(signal_responder.SignalResponder): """This resource associates a server with some configuration. The configuration is to be deployed to that server. A deployment allows input values to be specified which map to the inputs schema defined in the config resource. These input values are interpreted by the configuration tool in a tool-specific manner. Whenever this resource goes to an IN_PROGRESS state, it creates an ephemeral config that includes the inputs values plus a number of extra inputs which have names prefixed with deploy_. The extra inputs relate to the current state of the stack, along with the information and credentials required to signal back the deployment results. Unless signal_transport=NO_SIGNAL, this resource will remain in an IN_PROGRESS state until the server signals it with the output values for that deployment. Those output values are then available as resource attributes, along with the default attributes deploy_stdout, deploy_stderr and deploy_status_code. Specifying actions other than the default CREATE and UPDATE will result in the deployment being triggered in those actions. For example this would allow cleanup configuration to be performed during actions SUSPEND and DELETE. A config could be designed to only work with some specific actions, or a config can read the value of the deploy_action input to allow conditional logic to perform different configuration for different actions. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = (CONFIG, SERVER, INPUT_VALUES, DEPLOY_ACTIONS, NAME, SIGNAL_TRANSPORT) = ('config', 'server', 'input_values', 'actions', 'name', 'signal_transport') ALLOWED_DEPLOY_ACTIONS = ( resource.Resource.CREATE, resource.Resource.UPDATE, resource.Resource.DELETE, resource.Resource.SUSPEND, resource.Resource.RESUME, ) ATTRIBUTES = (STDOUT, STDERR, STATUS_CODE) = ('deploy_stdout', 'deploy_stderr', 'deploy_status_code') DERIVED_CONFIG_INPUTS = ( DEPLOY_SERVER_ID, DEPLOY_ACTION, DEPLOY_SIGNAL_ID, DEPLOY_STACK_ID, DEPLOY_RESOURCE_NAME, DEPLOY_AUTH_URL, DEPLOY_USERNAME, DEPLOY_PASSWORD, DEPLOY_PROJECT_ID, DEPLOY_USER_ID, DEPLOY_SIGNAL_VERB, DEPLOY_SIGNAL_TRANSPORT, DEPLOY_QUEUE_ID) = ('deploy_server_id', 'deploy_action', 'deploy_signal_id', 'deploy_stack_id', 'deploy_resource_name', 'deploy_auth_url', 'deploy_username', 'deploy_password', 'deploy_project_id', 'deploy_user_id', 'deploy_signal_verb', 'deploy_signal_transport', 'deploy_queue_id') SIGNAL_TRANSPORTS = (CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL, NO_SIGNAL, ZAQAR_SIGNAL) = ('CFN_SIGNAL', 'TEMP_URL_SIGNAL', 'HEAT_SIGNAL', 'NO_SIGNAL', 'ZAQAR_SIGNAL') properties_schema = { CONFIG: properties.Schema( properties.Schema.STRING, _('ID of software configuration resource to execute when ' 'applying to the server.'), update_allowed=True), SERVER: properties.Schema( properties.Schema.STRING, _('ID of resource to apply configuration to. ' 'Normally this should be a Nova server ID.'), required=True, ), INPUT_VALUES: properties.Schema( properties.Schema.MAP, _('Input values to apply to the software configuration on this ' 'server.'), update_allowed=True), DEPLOY_ACTIONS: properties.Schema( properties.Schema.LIST, _('Which lifecycle actions of the deployment resource will result ' 'in this deployment being triggered.'), update_allowed=True, default=[resource.Resource.CREATE, resource.Resource.UPDATE], constraints=[constraints.AllowedValues(ALLOWED_DEPLOY_ACTIONS)]), NAME: properties.Schema( properties.Schema.STRING, _('Name of the derived config associated with this deployment. ' 'This is used to apply a sort order to the list of ' 'configurations currently deployed to a server.'), update_allowed=True), SIGNAL_TRANSPORT: properties.Schema( properties.Schema.STRING, _('How the server should signal to heat with the deployment ' 'output values. CFN_SIGNAL will allow an HTTP POST to a CFN ' 'keypair signed URL. TEMP_URL_SIGNAL will create a ' 'Swift TempURL to be signaled via HTTP PUT. HEAT_SIGNAL ' 'will allow calls to the Heat API resource-signal using the ' 'provided keystone credentials. ZAQAR_SIGNAL will create a ' 'dedicated zaqar queue to be signaled using the provided ' 'keystone credentials. NO_SIGNAL will result in the resource ' 'going to the COMPLETE state without waiting for any signal.'), default=cfg.CONF.default_deployment_signal_transport, constraints=[ constraints.AllowedValues(SIGNAL_TRANSPORTS), ]), } attributes_schema = { STDOUT: attributes.Schema( _("Captured stdout from the configuration execution."), type=attributes.Schema.STRING), STDERR: attributes.Schema( _("Captured stderr from the configuration execution."), type=attributes.Schema.STRING), STATUS_CODE: attributes.Schema( _("Returned status code from the configuration execution."), type=attributes.Schema.STRING), } default_client_name = 'heat' no_signal_actions = () # No need to make metadata_update() calls since deployments have a # dedicated API for changing state on signals signal_needs_metadata_updates = False def _signal_transport_cfn(self): return self.properties[self.SIGNAL_TRANSPORT] == self.CFN_SIGNAL def _signal_transport_heat(self): return self.properties[self.SIGNAL_TRANSPORT] == self.HEAT_SIGNAL def _signal_transport_none(self): return self.properties[self.SIGNAL_TRANSPORT] == self.NO_SIGNAL def _signal_transport_temp_url(self): return self.properties[self.SIGNAL_TRANSPORT] == self.TEMP_URL_SIGNAL def _signal_transport_zaqar(self): return self.properties.get(self.SIGNAL_TRANSPORT) == self.ZAQAR_SIGNAL def _build_properties(self, config_id, action): props = { 'config_id': config_id, 'action': action, 'input_values': self.properties.get(self.INPUT_VALUES) } if self._signal_transport_none(): props['status'] = SoftwareDeployment.COMPLETE props['status_reason'] = _('Not waiting for outputs signal') else: props['status'] = SoftwareDeployment.IN_PROGRESS props['status_reason'] = _('Deploy data available') return props def _delete_derived_config(self, derived_config_id): try: self.rpc_client().delete_software_config(self.context, derived_config_id) except Exception as ex: self.rpc_client().ignore_error_named(ex, 'NotFound') def _get_derived_config(self, action, source_config): derived_params = self._build_derived_config_params( action, source_config) derived_config = self.rpc_client().create_software_config( self.context, **derived_params) return derived_config[rpc_api.SOFTWARE_CONFIG_ID] def _handle_action(self, action): if self.properties.get(self.CONFIG): config = self.rpc_client().show_software_config( self.context, self.properties.get(self.CONFIG)) else: config = {} if config.get(rpc_api.SOFTWARE_CONFIG_GROUP) == 'component': valid_actions = set() for conf in config['config']['configs']: valid_actions.update(conf['actions']) if action not in valid_actions: return elif action not in self.properties[self.DEPLOY_ACTIONS]: return props = self._build_properties( self._get_derived_config(action, config), action) if self.resource_id is None: resource_id = str(uuid.uuid4()) self.resource_id_set(resource_id) sd = self.rpc_client().create_software_deployment( self.context, deployment_id=resource_id, server_id=self.properties[SoftwareDeployment.SERVER], stack_user_project_id=self.stack.stack_user_project_id, **props) else: sd = self.rpc_client().show_software_deployment( self.context, self.resource_id) prev_derived_config = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID] sd = self.rpc_client().update_software_deployment( self.context, deployment_id=self.resource_id, **props) if prev_derived_config: self._delete_derived_config(prev_derived_config) if not self._signal_transport_none(): # NOTE(pshchelo): sd is a simple dict, easy to serialize, # does not need fixing re LP bug #1393268 return sd def _check_complete(self): sd = self.rpc_client().check_software_deployment( self.context, self.resource_id, self.stack.time_remaining()) status = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS] if status == SoftwareDeployment.COMPLETE: return True elif status == SoftwareDeployment.FAILED: status_reason = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS_REASON] message = _("Deployment to server failed: %s") % status_reason LOG.info(message) raise exception.Error(message) def empty_config(self): return '' def _build_derived_config_params(self, action, source): scl = sc.SoftwareConfig derived_inputs = self._build_derived_inputs(action, source) derived_options = self._build_derived_options(action, source) derived_config = self._build_derived_config(action, source, derived_inputs, derived_options) derived_name = self.properties.get(self.NAME) or source.get(scl.NAME) return { scl.GROUP: source.get(scl.GROUP) or 'Heat::Ungrouped', scl.CONFIG: derived_config or self.empty_config(), scl.OPTIONS: derived_options, scl.INPUTS: derived_inputs, scl.OUTPUTS: source.get(scl.OUTPUTS), scl.NAME: derived_name or self.physical_resource_name() } def _build_derived_config(self, action, source, derived_inputs, derived_options): return source.get(sc.SoftwareConfig.CONFIG) def _build_derived_options(self, action, source): return source.get(sc.SoftwareConfig.OPTIONS) def _build_derived_inputs(self, action, source): scl = sc.SoftwareConfig inputs = copy.deepcopy(source.get(scl.INPUTS)) or [] input_values = dict(self.properties.get(self.INPUT_VALUES) or {}) for inp in inputs: input_key = inp[scl.NAME] inp['value'] = input_values.pop(input_key, inp[scl.DEFAULT]) # for any input values that do not have a declared input, add # a derived declared input so that they can be used as config # inputs for inpk, inpv in input_values.items(): inputs.append({scl.NAME: inpk, scl.TYPE: 'String', 'value': inpv}) inputs.extend([{ scl.NAME: self.DEPLOY_SERVER_ID, scl.DESCRIPTION: _('ID of the server being deployed to'), scl.TYPE: 'String', 'value': self.properties[self.SERVER] }, { scl.NAME: self.DEPLOY_ACTION, scl.DESCRIPTION: _('Name of the current action being deployed'), scl.TYPE: 'String', 'value': action }, { scl.NAME: self.DEPLOY_STACK_ID, scl.DESCRIPTION: _('ID of the stack this deployment belongs to'), scl.TYPE: 'String', 'value': self.stack.identifier().stack_path() }, { scl.NAME: self.DEPLOY_RESOURCE_NAME, scl.DESCRIPTION: _('Name of this deployment resource in the ' 'stack'), scl.TYPE: 'String', 'value': self.name }, { scl.NAME: self.DEPLOY_SIGNAL_TRANSPORT, scl.DESCRIPTION: _('How the server should signal to heat with ' 'the deployment output values.'), scl.TYPE: 'String', 'value': self.properties[self.SIGNAL_TRANSPORT] }]) if self._signal_transport_cfn(): inputs.append({ scl.NAME: self.DEPLOY_SIGNAL_ID, scl.DESCRIPTION: _('ID of signal to use for signaling ' 'output values'), scl.TYPE: 'String', 'value': self._get_ec2_signed_url() }) inputs.append({ scl.NAME: self.DEPLOY_SIGNAL_VERB, scl.DESCRIPTION: _('HTTP verb to use for signaling ' 'output values'), scl.TYPE: 'String', 'value': 'POST' }) elif self._signal_transport_temp_url(): inputs.append({ scl.NAME: self.DEPLOY_SIGNAL_ID, scl.DESCRIPTION: _('ID of signal to use for signaling ' 'output values'), scl.TYPE: 'String', 'value': self._get_swift_signal_url() }) inputs.append({ scl.NAME: self.DEPLOY_SIGNAL_VERB, scl.DESCRIPTION: _('HTTP verb to use for signaling ' 'output values'), scl.TYPE: 'String', 'value': 'PUT' }) elif self._signal_transport_heat() or self._signal_transport_zaqar(): creds = self._get_heat_signal_credentials() inputs.extend([{ scl.NAME: self.DEPLOY_AUTH_URL, scl.DESCRIPTION: _('URL for API authentication'), scl.TYPE: 'String', 'value': creds['auth_url'] }, { scl.NAME: self.DEPLOY_USERNAME, scl.DESCRIPTION: _('Username for API authentication'), scl.TYPE: 'String', 'value': creds['username'] }, { scl.NAME: self.DEPLOY_USER_ID, scl.DESCRIPTION: _('User ID for API authentication'), scl.TYPE: 'String', 'value': creds['user_id'] }, { scl.NAME: self.DEPLOY_PASSWORD, scl.DESCRIPTION: _('Password for API authentication'), scl.TYPE: 'String', 'value': creds['password'] }, { scl.NAME: self.DEPLOY_PROJECT_ID, scl.DESCRIPTION: _('ID of project for API authentication'), scl.TYPE: 'String', 'value': creds['project_id'] }]) if self._signal_transport_zaqar(): inputs.append({ scl.NAME: self.DEPLOY_QUEUE_ID, scl.DESCRIPTION: _('ID of queue to use for signaling ' 'output values'), scl.TYPE: 'String', 'value': self._get_zaqar_signal_queue_id() }) return inputs def handle_create(self): return self._handle_action(self.CREATE) def check_create_complete(self, sd): if not sd: return True return self._check_complete() def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.properties = json_snippet.properties(self.properties_schema, self.context) return self._handle_action(self.UPDATE) def check_update_complete(self, sd): if not sd: return True return self._check_complete() def handle_delete(self): try: return self._handle_action(self.DELETE) except Exception as ex: self.rpc_client().ignore_error_named(ex, 'NotFound') def check_delete_complete(self, sd=None): if not sd or self._check_complete(): self._delete_resource() return True def _delete_resource(self): self._delete_signals() self._delete_user() derived_config_id = None if self.resource_id is not None: try: sd = self.rpc_client().show_software_deployment( self.context, self.resource_id) derived_config_id = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID] self.rpc_client().delete_software_deployment( self.context, self.resource_id) except Exception as ex: self.rpc_client().ignore_error_named(ex, 'NotFound') if derived_config_id: self._delete_derived_config(derived_config_id) def handle_suspend(self): return self._handle_action(self.SUSPEND) def check_suspend_complete(self, sd): if not sd: return True return self._check_complete() def handle_resume(self): return self._handle_action(self.RESUME) def check_resume_complete(self, sd): if not sd: return True return self._check_complete() def handle_signal(self, details): return self.rpc_client().signal_software_deployment( self.context, self.resource_id, details, timeutils.utcnow().isoformat()) def get_attribute(self, key, *path): """Resource attributes map to deployment outputs values.""" sd = self.rpc_client().show_software_deployment( self.context, self.resource_id) ov = sd[rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_VALUES] or {} if key in ov: attribute = ov.get(key) return attributes.select_from_attribute(attribute, path) # Since there is no value for this key yet, check the output schemas # to find out if the key is valid sc = self.rpc_client().show_software_config( self.context, self.properties[self.CONFIG]) outputs = sc[rpc_api.SOFTWARE_CONFIG_OUTPUTS] or [] output_keys = [output['name'] for output in outputs] if key not in output_keys and key not in self.ATTRIBUTES: raise exception.InvalidTemplateAttribute(resource=self.name, key=key) return None def validate(self): """Validate any of the provided params. :raises StackValidationFailed: if any property failed validation. """ super(SoftwareDeployment, self).validate() server = self.properties[self.SERVER] if server: res = self.stack.resource_by_refid(server) if res: if not (res.properties.get('user_data_format') == 'SOFTWARE_CONFIG'): raise exception.StackValidationFailed(message=_( "Resource %s's property user_data_format should be " "set to SOFTWARE_CONFIG since there are software " "deployments on it.") % server)
class ElasticIp(resource.Resource): PROPERTIES = ( DOMAIN, INSTANCE_ID, ) = ( 'Domain', 'InstanceId', ) ATTRIBUTES = (ALLOCATION_ID, ) = ('AllocationId', ) properties_schema = { DOMAIN: properties.Schema( properties.Schema.STRING, _('Set to "vpc" to have IP address allocation associated to your ' 'VPC.'), constraints=[ constraints.AllowedValues(['vpc']), ]), INSTANCE_ID: properties.Schema( properties.Schema.STRING, _('Instance ID to associate with EIP.'), update_allowed=True, constraints=[constraints.CustomConstraint('nova.server')]), } attributes_schema = { ALLOCATION_ID: attributes.Schema(_( 'ID that AWS assigns to represent the allocation of the address ' 'for use with Amazon VPC. Returned only for VPC elastic IP ' 'addresses.'), type=attributes.Schema.STRING), } default_client_name = 'nova' def __init__(self, name, json_snippet, stack): super(ElasticIp, self).__init__(name, json_snippet, stack) self.ipaddress = None def _ipaddress(self): if self.ipaddress is None and self.resource_id is not None: if self.properties[self.DOMAIN]: try: ips = self.neutron().show_floatingip(self.resource_id) except Exception as ex: self.client_plugin('neutron').ignore_not_found(ex) else: self.ipaddress = ips['floatingip']['floating_ip_address'] else: try: ips = self.client().floating_ips.get(self.resource_id) except Exception as e: self.client_plugin('nova').ignore_not_found(e) else: self.ipaddress = ips.ip return self.ipaddress or '' def handle_create(self): """Allocate a floating IP for the current tenant.""" ips = None if self.properties[self.DOMAIN]: ext_net = internet_gateway.InternetGateway.get_external_network_id( self.neutron()) props = {'floating_network_id': ext_net} ips = self.neutron().create_floatingip({'floatingip': props})['floatingip'] self.ipaddress = ips['floating_ip_address'] self.resource_id_set(ips['id']) LOG.info(_LI('ElasticIp create %s'), str(ips)) else: try: ips = self.client().floating_ips.create() except Exception as e: with excutils.save_and_reraise_exception(): if self.client_plugin('nova').is_not_found(e): LOG.error( _LE("No default floating IP pool configured." " Set 'default_floating_pool' in " "nova.conf.")) if ips: self.ipaddress = ips.ip self.resource_id_set(ips.id) LOG.info(_LI('ElasticIp create %s'), str(ips)) instance_id = self.properties[self.INSTANCE_ID] if instance_id: server = self.client().servers.get(instance_id) server.add_floating_ip(self._ipaddress()) def handle_delete(self): if self.resource_id is None: return # may be just create an eip when creation, or create the association # failed when creation, there will no association, if we attempt to # disassociate, an exception will raised, we need # to catch and ignore it, and then to deallocate the eip instance_id = self.properties[self.INSTANCE_ID] if instance_id: try: server = self.client().servers.get(instance_id) if server: server.remove_floating_ip(self._ipaddress()) except Exception as e: is_not_found = self.client_plugin('nova').is_not_found(e) is_unprocessable_entity = self.client_plugin( 'nova').is_unprocessable_entity(e) if (not is_not_found and not is_unprocessable_entity): raise # deallocate the eip if self.properties[self.DOMAIN]: with self.client_plugin('neutron').ignore_not_found: self.neutron().delete_floatingip(self.resource_id) else: with self.client_plugin('nova').ignore_not_found: self.client().floating_ips.delete(self.resource_id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: if self.INSTANCE_ID in prop_diff: instance_id = prop_diff.get(self.INSTANCE_ID) if instance_id: # no need to remove the floating ip from the old instance, # nova does this automatically when calling # add_floating_ip(). server = self.client().servers.get(instance_id) server.add_floating_ip(self._ipaddress()) else: # to remove the floating_ip from the old instance instance_id_old = self.properties[self.INSTANCE_ID] if instance_id_old: server = self.client().servers.get(instance_id_old) server.remove_floating_ip(self._ipaddress()) def get_reference_id(self): eip = self._ipaddress() if eip: return six.text_type(eip) else: return six.text_type(self.name) def _resolve_attribute(self, name): if name == self.ALLOCATION_ID: return six.text_type(self.resource_id)
class ResourceGroup(stack_resource.StackResource): """Creates one or more identically configured nested resources. In addition to the `refs` attribute, this resource implements synthetic attributes that mirror those of the resources in the group. When getting an attribute from this resource, however, a list of attribute values for each resource in the group is returned. To get attribute values for a single resource in the group, synthetic attributes of the form `resource.{resource index}.{attribute name}` can be used. The resource ID of a particular resource in the group can be obtained via the synthetic attribute `resource.{resource index}`. Note, that if you get attribute without `{resource index}`, e.g. `[resource, {attribute_name}]`, you'll get a list of this attribute's value for all resources in group. While each resource in the group will be identically configured, this resource does allow for some index-based customization of the properties of the resources in the group. For example:: resources: my_indexed_group: type: OS::Heat::ResourceGroup properties: count: 3 resource_def: type: OS::Nova::Server properties: # create a unique name for each server # using its index in the group name: my_server_%index% image: CentOS 6.5 flavor: 4GB Performance would result in a group of three servers having the same image and flavor, but names of `my_server_0`, `my_server_1`, and `my_server_2`. The variable used for substitution can be customized by using the `index_var` property. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( COUNT, INDEX_VAR, RESOURCE_DEF, REMOVAL_POLICIES, ) = ( 'count', 'index_var', 'resource_def', 'removal_policies', ) _RESOURCE_DEF_KEYS = ( RESOURCE_DEF_TYPE, RESOURCE_DEF_PROPERTIES, RESOURCE_DEF_METADATA, ) = ( 'type', 'properties', 'metadata', ) _REMOVAL_POLICIES_KEYS = (REMOVAL_RSRC_LIST, ) = ('resource_list', ) _ROLLING_UPDATES_SCHEMA_KEYS = ( MIN_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME, ) = ( 'min_in_service', 'max_batch_size', 'pause_time', ) _BATCH_CREATE_SCHEMA_KEYS = ( MAX_BATCH_SIZE, PAUSE_TIME, ) = ( 'max_batch_size', 'pause_time', ) _UPDATE_POLICY_SCHEMA_KEYS = ( ROLLING_UPDATE, BATCH_CREATE, ) = ( 'rolling_update', 'batch_create', ) ATTRIBUTES = ( REFS, ATTR_ATTRIBUTES, ) = ( 'refs', 'attributes', ) properties_schema = { COUNT: properties.Schema(properties.Schema.INTEGER, _('The number of resources to create.'), default=1, constraints=[ constraints.Range(min=0), ], update_allowed=True), INDEX_VAR: properties.Schema( properties.Schema.STRING, _('A variable that this resource will use to replace with the ' 'current index of a given resource in the group. Can be used, ' 'for example, to customize the name property of grouped ' 'servers in order to differentiate them when listed with ' 'nova client.'), default="%index%", constraints=[constraints.Length(min=3)], support_status=support.SupportStatus(version='2014.2')), RESOURCE_DEF: properties.Schema( properties.Schema.MAP, _('Resource definition for the resources in the group. The value ' 'of this property is the definition of a resource just as if ' 'it had been declared in the template itself.'), schema={ RESOURCE_DEF_TYPE: properties.Schema(properties.Schema.STRING, _('The type of the resources in the group.'), required=True), RESOURCE_DEF_PROPERTIES: properties.Schema( properties.Schema.MAP, _('Property values for the resources in the group.')), RESOURCE_DEF_METADATA: properties.Schema( properties.Schema.MAP, _('Supplied metadata for the resources in the group.'), support_status=support.SupportStatus(version='5.0.0')), }, required=True, update_allowed=True), REMOVAL_POLICIES: properties.Schema( properties.Schema.LIST, _('Policies for removal of resources on update.'), schema=properties.Schema( properties.Schema.MAP, _('Policy to be processed when doing an update which ' 'requires removal of specific resources.'), schema={ REMOVAL_RSRC_LIST: properties.Schema( properties.Schema.LIST, _("List of resources to be removed " "when doing an update which requires removal of " "specific resources. " "The resource may be specified several ways: " "(1) The resource name, as in the nested stack, " "(2) The resource reference returned from " "get_resource in a template, as available via " "the 'refs' attribute. " "Note this is destructive on update when specified; " "even if the count is not being reduced, and once " "a resource name is removed, it's name is never " "reused in subsequent updates."), default=[]), }, ), update_allowed=True, default=[], support_status=support.SupportStatus(version='2015.1')), } attributes_schema = { REFS: attributes.Schema( _("A list of resource IDs for the resources in the group."), type=attributes.Schema.LIST), ATTR_ATTRIBUTES: attributes.Schema( _("A map of resource names to the specified attribute of each " "individual resource. " "Requires heat_template_version: 2014-10-16."), support_status=support.SupportStatus(version='2014.2'), type=attributes.Schema.MAP), } rolling_update_schema = { MIN_IN_SERVICE: properties.Schema(properties.Schema.INTEGER, _('The minimum number of resources in service while ' 'rolling updates are being executed.'), constraints=[constraints.Range(min=0)], default=0), MAX_BATCH_SIZE: properties.Schema( properties.Schema.INTEGER, _('The maximum number of resources to replace at once.'), constraints=[constraints.Range(min=1)], default=1), PAUSE_TIME: properties.Schema(properties.Schema.NUMBER, _('The number of seconds to wait between batches of ' 'updates.'), constraints=[constraints.Range(min=0)], default=0), } batch_create_schema = { MAX_BATCH_SIZE: properties.Schema( properties.Schema.INTEGER, _('The maximum number of resources to create at once.'), constraints=[constraints.Range(min=1)], default=1), PAUSE_TIME: properties.Schema(properties.Schema.NUMBER, _('The number of seconds to wait between batches.'), constraints=[constraints.Range(min=0)], default=0), } update_policy_schema = { ROLLING_UPDATE: properties.Schema( properties.Schema.MAP, schema=rolling_update_schema, support_status=support.SupportStatus(version='5.0.0')), BATCH_CREATE: properties.Schema( properties.Schema.MAP, schema=batch_create_schema, support_status=support.SupportStatus(version='5.0.0')) } def get_size(self): return self.properties.get(self.COUNT) def validate_nested_stack(self): # Only validate the resource definition (which may be a # nested template) if count is non-zero, to enable folks # to disable features via a zero count if they wish if not self.get_size(): return test_tmpl = self._assemble_nested(["0"], include_all=True) res_def = next( six.itervalues(test_tmpl.resource_definitions(self.stack))) # make sure we can resolve the nested resource type self.stack.env.get_class_to_instantiate(res_def.resource_type) try: name = "%s-%s" % (self.stack.name, self.name) nested_stack = self._parse_nested_stack(name, test_tmpl, self.child_params()) nested_stack.strict_validate = False nested_stack.validate() except Exception as ex: msg = _("Failed to validate: %s") % six.text_type(ex) raise exception.StackValidationFailed(message=msg) def _name_blacklist(self): """Resolve the remove_policies to names for removal.""" nested = self.nested() # To avoid reusing names after removal, we store a comma-separated # blacklist in the resource data db_rsrc_names = self.data().get('name_blacklist') if db_rsrc_names: current_blacklist = db_rsrc_names.split(',') else: current_blacklist = [] # Now we iterate over the removal policies, and update the blacklist # with any additional names rsrc_names = set(current_blacklist) if nested: for r in self.properties[self.REMOVAL_POLICIES]: if self.REMOVAL_RSRC_LIST in r: # Tolerate string or int list values for n in r[self.REMOVAL_RSRC_LIST]: str_n = six.text_type(n) if str_n in nested: rsrc_names.add(str_n) continue rsrc = nested.resource_by_refid(str_n) if rsrc: rsrc_names.add(rsrc.name) # If the blacklist has changed, update the resource data if rsrc_names != set(current_blacklist): self.data_set('name_blacklist', ','.join(rsrc_names)) return rsrc_names def _resource_names(self, size=None): name_blacklist = self._name_blacklist() if size is None: size = self.get_size() def is_blacklisted(name): return name in name_blacklist candidates = six.moves.map(six.text_type, itertools.count()) return itertools.islice( six.moves.filterfalse(is_blacklisted, candidates), size) def _count_black_listed(self): """Return the number of current resource names that are blacklisted.""" existing_members = grouputils.get_member_names(self) return len(self._name_blacklist() & set(existing_members)) def handle_create(self): if self.update_policy.get(self.BATCH_CREATE): batch_create = self.update_policy[self.BATCH_CREATE] max_batch_size = batch_create[self.MAX_BATCH_SIZE] pause_sec = batch_create[self.PAUSE_TIME] checkers = self._replace(0, max_batch_size, pause_sec) checkers[0].start() return checkers else: names = self._resource_names() self.create_with_template(self._assemble_nested(names), self.child_params(), self.stack.timeout_secs()) def check_create_complete(self, checkers=None): if checkers is None: return super(ResourceGroup, self).check_create_complete() for checker in checkers: if not checker.started(): checker.start() if not checker.step(): return False return True def _run_to_completion(self, template, timeout): updater = self.update_with_template(template, {}, timeout) while not super(ResourceGroup, self).check_update_complete(updater): yield def _run_update(self, total_capacity, max_updates, timeout): template = self._assemble_for_rolling_update(total_capacity, max_updates) return self._run_to_completion(template, timeout) def check_update_complete(self, checkers): for checker in checkers: if not checker.started(): checker.start() if not checker.step(): return False return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): if tmpl_diff: # parse update policy if rsrc_defn.UPDATE_POLICY in tmpl_diff: up = json_snippet.update_policy(self.update_policy_schema, self.context) self.update_policy = up checkers = [] self.properties = json_snippet.properties(self.properties_schema, self.context) if prop_diff and self.RESOURCE_DEF in prop_diff: updaters = self._try_rolling_update() if updaters: checkers.extend(updaters) if not checkers: resizer = scheduler.TaskRunner( self._run_to_completion, self._assemble_nested(self._resource_names()), self.stack.timeout_mins) checkers.append(resizer) checkers[0].start() return checkers def get_attribute(self, key, *path): if key.startswith("resource."): return grouputils.get_nested_attrs(self, key, False, *path) names = self._resource_names() if key == self.REFS: vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names] return attributes.select_from_attribute(vals, path) if key == self.ATTR_ATTRIBUTES: if not path: raise exception.InvalidTemplateAttribute(resource=self.name, key=key) return dict( (n, grouputils.get_rsrc_attr(self, key, False, n, *path)) for n in names) path = [key] + list(path) return [ grouputils.get_rsrc_attr(self, key, False, n, *path) for n in names ] def build_resource_definition(self, res_name, res_defn): res_def = copy.deepcopy(res_defn) props = res_def.get(self.RESOURCE_DEF_PROPERTIES) if props: repl_props = self._handle_repl_val(res_name, props) res_def[self.RESOURCE_DEF_PROPERTIES] = repl_props return template.HOTemplate20130523.rsrc_defn_from_snippet( res_name, res_def) def get_resource_def(self, include_all=False): """Returns the resource definition portion of the group. :param include_all: if False, only properties for the resource definition that are not empty will be included :type include_all: bool :return: resource definition for the group :rtype: dict """ # At this stage, we don't mind if all of the parameters have values # assigned. Pass in a custom resolver to the properties to not # error when a parameter does not have a user entered value. def ignore_param_resolve(snippet): while isinstance(snippet, function.Function): try: snippet = snippet.result() except exception.UserParameterMissing: return None if isinstance(snippet, collections.Mapping): return dict( (k, ignore_param_resolve(v)) for k, v in snippet.items()) elif (not isinstance(snippet, six.string_types) and isinstance(snippet, collections.Iterable)): return [ignore_param_resolve(v) for v in snippet] return snippet self.properties.resolve = ignore_param_resolve res_def = self.properties[self.RESOURCE_DEF] if not include_all: return self._clean_props(res_def) return res_def def _clean_props(self, res_defn): res_def = copy.deepcopy(res_defn) props = res_def.get(self.RESOURCE_DEF_PROPERTIES) if props: clean = dict((k, v) for k, v in props.items() if v is not None) props = clean res_def[self.RESOURCE_DEF_PROPERTIES] = props return res_def def _handle_repl_val(self, res_name, val): repl_var = self.properties[self.INDEX_VAR] def recurse(x): return self._handle_repl_val(res_name, x) if isinstance(val, six.string_types): return val.replace(repl_var, res_name) elif isinstance(val, collections.Mapping): return {k: recurse(v) for k, v in val.items()} elif isinstance(val, collections.Sequence): return [recurse(v) for v in val] return val def _assemble_nested(self, names, include_all=False, template_version=('heat_template_version', '2015-04-30')): def_dict = self.get_resource_def(include_all) definitions = [(k, self.build_resource_definition(k, def_dict)) for k in names] return scl_template.make_template(definitions, version=template_version) def _assemble_for_rolling_update(self, total_capacity, max_updates, include_all=False, template_version=('heat_template_version', '2015-04-30')): names = list(self._resource_names(total_capacity)) name_blacklist = self._name_blacklist() valid_resources = [(n, d) for n, d in grouputils.get_member_definitions(self) if n not in name_blacklist] targ_cap = self.get_size() def replace_priority(res_item): name, defn = res_item try: index = names.index(name) except ValueError: # High priority - delete immediately return 0 else: if index < targ_cap: # Update higher indices first return targ_cap - index else: # Low priority - don't update return total_capacity old_resources = sorted(valid_resources, key=replace_priority) existing_names = set(n for n, d in valid_resources) new_names = six.moves.filterfalse(lambda n: n in existing_names, names) res_def = self.get_resource_def(include_all) definitions = scl_template.member_definitions( old_resources, res_def, total_capacity, max_updates, lambda: next(new_names), self.build_resource_definition) return scl_template.make_template(definitions, version=template_version) def _try_rolling_update(self): if self.update_policy[self.ROLLING_UPDATE]: policy = self.update_policy[self.ROLLING_UPDATE] return self._replace(policy[self.MIN_IN_SERVICE], policy[self.MAX_BATCH_SIZE], policy[self.PAUSE_TIME]) def _update_timeout(self, batch_cnt, pause_sec): total_pause_time = pause_sec * max(batch_cnt - 1, 0) if total_pause_time >= self.stack.timeout_secs(): msg = _('The current %s will result in stack update ' 'timeout.') % rsrc_defn.UPDATE_POLICY raise ValueError(msg) return self.stack.timeout_secs() - total_pause_time @staticmethod def _get_batches(targ_cap, curr_cap, batch_size, min_in_service): updated = 0 while rolling_update.needs_update(targ_cap, curr_cap, updated): new_cap, total_new = rolling_update.next_batch( targ_cap, curr_cap, updated, batch_size, min_in_service) yield new_cap, total_new updated += total_new - max(new_cap - max(curr_cap, targ_cap), 0) curr_cap = new_cap def _replace(self, min_in_service, batch_size, pause_sec): def pause_between_batch(pause_sec): duration = timeutils.Duration(pause_sec) while not duration.expired(): yield # blacklist count existing num_blacklist = self._count_black_listed() # current capacity not including existing blacklisted curr_cap = len(self.nested()) - num_blacklist if self.nested() else 0 batches = list( self._get_batches(self.get_size(), curr_cap, batch_size, min_in_service)) update_timeout = self._update_timeout(len(batches), pause_sec) def tasks(): for index, (curr_cap, max_upd) in enumerate(batches): yield scheduler.TaskRunner(self._run_update, curr_cap, max_upd, update_timeout) if index < (len(batches) - 1) and pause_sec > 0: yield scheduler.TaskRunner(pause_between_batch, pause_sec) return list(tasks()) def child_template(self): names = self._resource_names() return self._assemble_nested(names) def child_params(self): return {} def handle_adopt(self, resource_data): names = self._resource_names() if names: return self.create_with_template(self._assemble_nested(names), {}, adopt_data=resource_data)
class SwiftSignal(resource.Resource): """Resource for handling signals received by SwiftSignalHandle. This resource handles signals received by SwiftSignalHandle and is same as WaitCondition resource. """ support_status = support.SupportStatus(version='2014.2') default_client_name = "swift" PROPERTIES = ( HANDLE, TIMEOUT, COUNT, ) = ( 'handle', 'timeout', 'count', ) properties_schema = { HANDLE: properties.Schema(properties.Schema.STRING, required=True, description=_( 'URL of TempURL where resource will signal ' 'completion and optionally upload data.')), TIMEOUT: properties.Schema( properties.Schema.NUMBER, description=_('The maximum number of seconds to wait for the ' 'resource to signal completion. Once the timeout ' 'is reached, creation of the signal resource will ' 'fail.'), required=True, constraints=[ constraints.Range(1, 43200), ]), COUNT: properties.Schema(properties.Schema.INTEGER, description=_( 'The number of success signals that must be ' 'received before the stack creation process ' 'continues.'), default=1, constraints=[ constraints.Range(1, 1000), ]) } ATTRIBUTES = (DATA) = 'data' attributes_schema = { DATA: attributes.Schema( _('JSON data that was uploaded via the SwiftSignalHandle.'), type=attributes.Schema.STRING) } WAIT_STATUSES = ( STATUS_FAILURE, STATUS_SUCCESS, ) = ( 'FAILURE', 'SUCCESS', ) METADATA_KEYS = (DATA, REASON, STATUS, UNIQUE_ID) = ('data', 'reason', 'status', 'id') def __init__(self, name, json_snippet, stack): super(SwiftSignal, self).__init__(name, json_snippet, stack) self._obj_name = None self._url = None @property def url(self): if not self._url: self._url = parse.urlparse(self.properties[self.HANDLE]) return self._url @property def obj_name(self): if not self._obj_name: self._obj_name = self.url.path.split('/')[4] return self._obj_name def _validate_handle_url(self): parts = self.url.path.split('/') msg = _('"%(url)s" is not a valid SwiftSignalHandle. The %(part)s ' 'is invalid') cplugin = self.client_plugin() if not cplugin.is_valid_temp_url_path(self.url.path): raise ValueError(msg % { 'url': self.url.path, 'part': 'Swift TempURL path' }) if not parts[3] == self.stack.id: raise ValueError(msg % { 'url': self.url.path, 'part': 'container name' }) def handle_create(self): self._validate_handle_url() started_at = timeutils.utcnow() return started_at, float(self.properties[self.TIMEOUT]) def get_signals(self): try: container = self.client().get_container(self.stack.id) except Exception as exc: self.client_plugin().ignore_not_found(exc) LOG.debug("Swift container %s was not found" % self.stack.id) return [] index = container[1] if not index: LOG.debug("Swift objects in container %s were not found" % self.stack.id) return [] # Remove objects in that are for other handle resources, since # multiple SwiftSignalHandle resources in the same stack share # a container filtered = [obj for obj in index if self.obj_name in obj['name']] # Fetch objects from Swift and filter results obj_bodies = [] for obj in filtered: try: signal = self.client().get_object(self.stack.id, obj['name']) except Exception as exc: self.client_plugin().ignore_not_found(exc) continue body = signal[1] if body == swift.IN_PROGRESS: # Ignore the initial object continue if body == "": obj_bodies.append({}) continue try: obj_bodies.append(jsonutils.loads(body)) except ValueError: raise exception.Error( _("Failed to parse JSON data: %s") % body) # Set default values on each signal signals = [] signal_num = 1 for signal in obj_bodies: # Remove previous signals with the same ID sig_id = self.UNIQUE_ID ids = [s.get(sig_id) for s in signals if sig_id in s] if ids and sig_id in signal and ids.count(signal[sig_id]) > 0: [ signals.remove(s) for s in signals if s.get(sig_id) == signal[sig_id] ] # Make sure all fields are set, since all are optional signal.setdefault(self.DATA, None) unique_id = signal.setdefault(sig_id, signal_num) reason = 'Signal %s received' % unique_id signal.setdefault(self.REASON, reason) signal.setdefault(self.STATUS, self.STATUS_SUCCESS) signals.append(signal) signal_num += 1 return signals def get_status(self): return [s[self.STATUS] for s in self.get_signals()] def get_status_reason(self, status): return [ s[self.REASON] for s in self.get_signals() if s[self.STATUS] == status ] def get_data(self): signals = self.get_signals() if not signals: return None data = {} for signal in signals: data[signal[self.UNIQUE_ID]] = signal[self.DATA] return data def check_create_complete(self, create_data): if timeutils.is_older_than(*create_data): raise SwiftSignalTimeout(self) statuses = self.get_status() if not statuses: return False for status in statuses: if status == self.STATUS_FAILURE: failure = SwiftSignalFailure(self) LOG.info(_LI('%(name)s Failed (%(failure)s)'), { 'name': str(self), 'failure': str(failure) }) raise failure elif status != self.STATUS_SUCCESS: raise exception.Error(_("Unknown status: %s") % status) if len(statuses) >= self.properties[self.COUNT]: LOG.info(_LI("%s Succeeded"), str(self)) return True return False def _resolve_attribute(self, key): if key == self.DATA: return six.text_type(jsonutils.dumps(self.get_data()))
class Subnet(neutron.NeutronResource): """A resource for managing Neutron subnets. A subnet represents an IP address block that can be used for assigning IP addresses to virtual instances. Each subnet must have a CIDR and must be associated with a network. IPs can be either selected from the whole subnet CIDR, or from "allocation pools" that can be specified by the user. """ PROPERTIES = ( NETWORK_ID, NETWORK, SUBNETPOOL, PREFIXLEN, CIDR, VALUE_SPECS, NAME, IP_VERSION, DNS_NAMESERVERS, GATEWAY_IP, ENABLE_DHCP, ALLOCATION_POOLS, TENANT_ID, HOST_ROUTES, IPV6_RA_MODE, IPV6_ADDRESS_MODE, ) = ( 'network_id', 'network', 'subnetpool', 'prefixlen', 'cidr', 'value_specs', 'name', 'ip_version', 'dns_nameservers', 'gateway_ip', 'enable_dhcp', 'allocation_pools', 'tenant_id', 'host_routes', 'ipv6_ra_mode', 'ipv6_address_mode', ) _ALLOCATION_POOL_KEYS = ( ALLOCATION_POOL_START, ALLOCATION_POOL_END, ) = ( 'start', 'end', ) _HOST_ROUTES_KEYS = ( ROUTE_DESTINATION, ROUTE_NEXTHOP, ) = ( 'destination', 'nexthop', ) _IPV6_DHCP_MODES = ( DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC, ) = ( 'dhcpv6-stateful', 'dhcpv6-stateless', 'slaac', ) ATTRIBUTES = ( NAME_ATTR, NETWORK_ID_ATTR, TENANT_ID_ATTR, ALLOCATION_POOLS_ATTR, GATEWAY_IP_ATTR, HOST_ROUTES_ATTR, IP_VERSION_ATTR, CIDR_ATTR, DNS_NAMESERVERS_ATTR, ENABLE_DHCP_ATTR, ) = ( 'name', 'network_id', 'tenant_id', 'allocation_pools', 'gateway_ip', 'host_routes', 'ip_version', 'cidr', 'dns_nameservers', 'enable_dhcp', ) properties_schema = { NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % NETWORK, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.network')], ), NETWORK: properties.Schema( properties.Schema.STRING, _('The ID of the attached network.'), required=True, constraints=[constraints.CustomConstraint('neutron.network')], support_status=support.SupportStatus(version='2014.2')), SUBNETPOOL: properties.Schema( properties.Schema.STRING, _('The name or ID of the subnet pool.'), constraints=[constraints.CustomConstraint('neutron.subnetpool')], support_status=support.SupportStatus(version='6.0.0'), ), PREFIXLEN: properties.Schema( properties.Schema.INTEGER, _('Prefix length for subnet allocation from subnet pool.'), constraints=[constraints.Range(min=0)], support_status=support.SupportStatus(version='6.0.0'), ), CIDR: properties.Schema( properties.Schema.STRING, _('The CIDR.'), constraints=[constraints.CustomConstraint('net_cidr')]), VALUE_SPECS: properties.Schema(properties.Schema.MAP, _('Extra parameters to include in the request.'), default={}, update_allowed=True), NAME: properties.Schema(properties.Schema.STRING, _('The name of the subnet.'), update_allowed=True), IP_VERSION: properties.Schema(properties.Schema.INTEGER, _('The IP version, which is 4 or 6.'), default=4, constraints=[ constraints.AllowedValues([4, 6]), ]), DNS_NAMESERVERS: properties.Schema(properties.Schema.LIST, _('A specified set of DNS name servers to be used.'), default=[], update_allowed=True), GATEWAY_IP: properties.Schema( properties.Schema.STRING, _('The gateway IP address. Set to any of [ null | ~ | "" ] ' 'to create/update a subnet without a gateway. ' 'If omitted when creation, neutron will assign the first ' 'free IP address within the subnet to the gateway ' 'automatically. If remove this from template when update, ' 'the old gateway IP address will be detached.'), update_allowed=True), ENABLE_DHCP: properties.Schema( properties.Schema.BOOLEAN, _('Set to true if DHCP is enabled and false if DHCP is disabled.'), default=True, update_allowed=True), ALLOCATION_POOLS: properties.Schema( properties.Schema.LIST, _('The start and end addresses for the allocation pools.'), schema=properties.Schema( properties.Schema.MAP, schema={ ALLOCATION_POOL_START: properties.Schema( properties.Schema.STRING, _('Start address for the allocation pool.'), required=True, constraints=[constraints.CustomConstraint('ip_addr')]), ALLOCATION_POOL_END: properties.Schema( properties.Schema.STRING, _('End address for the allocation pool.'), required=True, constraints=[constraints.CustomConstraint('ip_addr')]), }, ), update_allowed=True), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the tenant who owns the network. Only administrative ' 'users can specify a tenant ID other than their own.')), HOST_ROUTES: properties.Schema( properties.Schema.LIST, _('A list of host route dictionaries for the subnet.'), schema=properties.Schema( properties.Schema.MAP, schema={ ROUTE_DESTINATION: properties.Schema( properties.Schema.STRING, _('The destination for static route.'), required=True, constraints=[constraints.CustomConstraint('net_cidr') ]), ROUTE_NEXTHOP: properties.Schema( properties.Schema.STRING, _('The next hop for the destination.'), required=True, constraints=[constraints.CustomConstraint('ip_addr')]), }, ), update_allowed=True), IPV6_RA_MODE: properties.Schema( properties.Schema.STRING, _('IPv6 RA (Router Advertisement) mode.'), constraints=[ constraints.AllowedValues( [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]), ], support_status=support.SupportStatus(version='2015.1')), IPV6_ADDRESS_MODE: properties.Schema( properties.Schema.STRING, _('IPv6 address mode.'), constraints=[ constraints.AllowedValues( [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]), ], support_status=support.SupportStatus(version='2015.1')), } attributes_schema = { NAME_ATTR: attributes.Schema(_("Friendly name of the subnet."), type=attributes.Schema.STRING), NETWORK_ID_ATTR: attributes.Schema(_("Parent network of the subnet."), type=attributes.Schema.STRING), TENANT_ID_ATTR: attributes.Schema(_("Tenant owning the subnet."), type=attributes.Schema.STRING), ALLOCATION_POOLS_ATTR: attributes.Schema(_("Ip allocation pools and their ranges."), type=attributes.Schema.LIST), GATEWAY_IP_ATTR: attributes.Schema(_("Ip of the subnet's gateway."), type=attributes.Schema.STRING), HOST_ROUTES_ATTR: attributes.Schema(_("Additional routes for this subnet."), type=attributes.Schema.LIST), IP_VERSION_ATTR: attributes.Schema(_("Ip version for the subnet."), type=attributes.Schema.STRING), CIDR_ATTR: attributes.Schema(_("CIDR block notation for this subnet."), type=attributes.Schema.STRING), DNS_NAMESERVERS_ATTR: attributes.Schema(_("List of dns nameservers."), type=attributes.Schema.LIST), ENABLE_DHCP_ATTR: attributes.Schema( _("'true' if DHCP is enabled for this subnet; 'false' otherwise."), type=attributes.Schema.STRING), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.NETWORK], value_path=[self.NETWORK_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network'), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.SUBNETPOOL], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnetpool') ] @classmethod def _null_gateway_ip(cls, props): if cls.GATEWAY_IP not in props: return # Specifying null in the gateway_ip will result in # a property containing an empty string. # A null gateway_ip has special meaning in the API # so this needs to be set back to None. # See bug https://bugs.launchpad.net/heat/+bug/1226666 if props.get(cls.GATEWAY_IP) == '': props[cls.GATEWAY_IP] = None def validate(self): super(Subnet, self).validate() subnetpool = self.properties[self.SUBNETPOOL] prefixlen = self.properties[self.PREFIXLEN] cidr = self.properties[self.CIDR] if subnetpool and cidr: raise exception.ResourcePropertyConflict(self.SUBNETPOOL, self.CIDR) if not subnetpool and not cidr: raise exception.PropertyUnspecifiedError(self.SUBNETPOOL, self.CIDR) if prefixlen and cidr: raise exception.ResourcePropertyConflict(self.PREFIXLEN, self.CIDR) ra_mode = self.properties[self.IPV6_RA_MODE] address_mode = self.properties[self.IPV6_ADDRESS_MODE] if (self.properties[self.IP_VERSION] == 4) and (ra_mode or address_mode): msg = _('ipv6_ra_mode and ipv6_address_mode are not supported ' 'for ipv4.') raise exception.StackValidationFailed(message=msg) if ra_mode and address_mode and (ra_mode != address_mode): msg = _('When both ipv6_ra_mode and ipv6_address_mode are set, ' 'they must be equal.') raise exception.StackValidationFailed(message=msg) gateway_ip = self.properties.get(self.GATEWAY_IP) if (gateway_ip and gateway_ip not in ['~', ''] and not netutils.is_valid_ip(gateway_ip)): msg = (_('Gateway IP address "%(gateway)s" is in ' 'invalid format.'), gateway_ip) raise exception.StackValidationFailed(message=msg) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['network_id'] = props.pop(self.NETWORK) if self.SUBNETPOOL in props and props[self.SUBNETPOOL]: props['subnetpool_id'] = props.pop('subnetpool') self._null_gateway_ip(props) subnet = self.client().create_subnet({'subnet': props})['subnet'] self.resource_id_set(subnet['id']) def handle_delete(self): if not self.resource_id: return try: self.client().delete_subnet(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def _show_resource(self): return self.client().show_subnet(self.resource_id)['subnet'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) if (self.ALLOCATION_POOLS in prop_diff and prop_diff[self.ALLOCATION_POOLS] is None): prop_diff[self.ALLOCATION_POOLS] = [] # If the new value is '', set to None self._null_gateway_ip(prop_diff) self.client().update_subnet(self.resource_id, {'subnet': prop_diff}) def is_allow_replace(self): return True
class SwiftSignalHandle(resource.Resource): """Resource for managing signals from Swift resources. This resource is same as WaitConditionHandle, but designed for using by Swift resources. """ support_status = support.SupportStatus(version='2014.2') default_client_name = "swift" properties_schema = {} ATTRIBUTES = ( TOKEN, ENDPOINT, CURL_CLI, ) = ( 'token', 'endpoint', 'curl_cli', ) attributes_schema = { TOKEN: attributes.Schema(_( 'Tokens are not needed for Swift TempURLs. This attribute is ' 'being kept for compatibility with the ' 'OS::Heat::WaitConditionHandle resource.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), ENDPOINT: attributes.Schema( _('Endpoint/url which can be used for signalling handle.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), CURL_CLI: attributes.Schema(_( 'Convenience attribute, provides curl CLI command ' 'prefix, which can be used for signalling handle completion or ' 'failure. You can signal success by adding ' '--data-binary \'{"status": "SUCCESS"}\' ' ', or signal failure by adding ' '--data-binary \'{"status": "FAILURE"}\'.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), } def handle_create(self): cplugin = self.client_plugin() url = cplugin.get_signal_url(self.stack.id, self.physical_resource_name()) self.data_set(self.ENDPOINT, url) self.resource_id_set(self.physical_resource_name()) def _resolve_attribute(self, key): if self.resource_id: if key == self.TOKEN: return '' # HeatWaitConditionHandle compatibility elif key == self.ENDPOINT: return self.data().get(self.ENDPOINT) elif key == self.CURL_CLI: return ("curl -i -X PUT '%s'" % self.data().get(self.ENDPOINT)) def handle_delete(self): cplugin = self.client_plugin() client = cplugin.client() # Delete all versioned objects while True: try: client.delete_object(self.stack.id, self.physical_resource_name()) except Exception as exc: cplugin.ignore_not_found(exc) break # Delete the container if it is empty try: client.delete_container(self.stack.id) except Exception as exc: if cplugin.is_not_found(exc) or cplugin.is_conflict(exc): pass else: raise self.data_delete(self.ENDPOINT) def get_reference_id(self): return self.data().get(self.ENDPOINT)
class Pool(neutron.NeutronResource): """A resource for managing load balancer pools in Neutron. A load balancing pool is a logical set of devices, such as web servers, that you group together to receive and process traffic. The loadbalancing function chooses a member of the pool according to the configured load balancing method to handle the new requests or connections received on the VIP address. There is only one pool for a VIP. """ required_service_extension = 'lbaas' PROPERTIES = ( PROTOCOL, SUBNET_ID, SUBNET, LB_METHOD, NAME, DESCRIPTION, ADMIN_STATE_UP, VIP, MONITORS, PROVIDER, ) = ( 'protocol', 'subnet_id', 'subnet', 'lb_method', 'name', 'description', 'admin_state_up', 'vip', 'monitors', 'provider', ) _VIP_KEYS = ( VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS, VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT, VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP, ) = ( 'name', 'description', 'subnet', 'address', 'connection_limit', 'protocol_port', 'session_persistence', 'admin_state_up', ) _VIP_SESSION_PERSISTENCE_KEYS = ( VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME, ) = ( 'type', 'cookie_name', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, NAME_ATTR, PROTOCOL_ATTR, SUBNET_ID_ATTR, LB_METHOD_ATTR, DESCRIPTION_ATTR, TENANT_ID, VIP_ATTR, PROVIDER_ATTR, ) = ( 'admin_state_up', 'name', 'protocol', 'subnet_id', 'lb_method', 'description', 'tenant_id', 'vip', 'provider', ) properties_schema = { PROTOCOL: properties.Schema( properties.Schema.STRING, _('Protocol for balancing.'), required=True, constraints=[ constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']), ] ), SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % SUBNET, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2' ) ), constraints=[ constraints.CustomConstraint('neutron.subnet') ] ), SUBNET: properties.Schema( properties.Schema.STRING, _('The subnet for the port on which the members ' 'of the pool will be connected.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[ constraints.CustomConstraint('neutron.subnet') ] ), LB_METHOD: properties.Schema( properties.Schema.STRING, _('The algorithm used to distribute load between the members of ' 'the pool.'), required=True, constraints=[ constraints.AllowedValues(['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']), ], update_allowed=True ), NAME: properties.Schema( properties.Schema.STRING, _('Name of the pool.') ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of the pool.'), update_allowed=True ), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of this pool.'), default=True, update_allowed=True ), PROVIDER: properties.Schema( properties.Schema.STRING, _('LBaaS provider to implement this load balancer instance.'), support_status=support.SupportStatus(version='5.0.0'), constraints=[ constraints.CustomConstraint('neutron.lb.provider') ], ), VIP: properties.Schema( properties.Schema.MAP, _('IP address and port of the pool.'), schema={ VIP_NAME: properties.Schema( properties.Schema.STRING, _('Name of the vip.') ), VIP_DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of the vip.') ), VIP_SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet of the vip.'), constraints=[ constraints.CustomConstraint('neutron.subnet') ] ), VIP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the vip.'), constraints=[ constraints.CustomConstraint('ip_addr') ] ), VIP_CONNECTION_LIMIT: properties.Schema( properties.Schema.INTEGER, _('The maximum number of connections per second ' 'allowed for the vip.') ), VIP_PROTOCOL_PORT: properties.Schema( properties.Schema.INTEGER, _('TCP port on which to listen for client traffic ' 'that is associated with the vip address.'), required=True ), VIP_SESSION_PERSISTENCE: properties.Schema( properties.Schema.MAP, _('Configuration of session persistence.'), schema={ VIP_SESSION_PERSISTENCE_TYPE: properties.Schema( properties.Schema.STRING, _('Method of implementation of session ' 'persistence feature.'), required=True, constraints=[constraints.AllowedValues( ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'] )] ), VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema( properties.Schema.STRING, _('Name of the cookie, ' 'required if type is APP_COOKIE.') ) } ), VIP_ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of this vip.'), default=True ), }, required=True ), MONITORS: properties.Schema( properties.Schema.LIST, _('List of health monitors associated with the pool.'), default=[], update_allowed=True ), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema( _('The administrative state of this pool.'), type=attributes.Schema.STRING ), NAME_ATTR: attributes.Schema( _('Name of the pool.'), type=attributes.Schema.STRING ), PROTOCOL_ATTR: attributes.Schema( _('Protocol to balance.'), type=attributes.Schema.STRING ), SUBNET_ID_ATTR: attributes.Schema( _('The subnet for the port on which the members of the pool ' 'will be connected.'), type=attributes.Schema.STRING ), LB_METHOD_ATTR: attributes.Schema( _('The algorithm used to distribute load between the members ' 'of the pool.'), type=attributes.Schema.STRING ), DESCRIPTION_ATTR: attributes.Schema( _('Description of the pool.'), type=attributes.Schema.STRING ), TENANT_ID: attributes.Schema( _('Tenant owning the pool.'), type=attributes.Schema.STRING ), VIP_ATTR: attributes.Schema( _('Vip associated with the pool.'), type=attributes.Schema.MAP ), PROVIDER_ATTR: attributes.Schema( _('Provider implementing this load balancer instance.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING, ), } def translation_rules(self, props): return [ translation.TranslationRule( props, translation.TranslationRule.REPLACE, [self.SUBNET], value_path=[self.SUBNET_ID] ), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet' ), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.VIP, self.VIP_SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet' ) ] def validate(self): res = super(Pool, self).validate() if res: return res session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE) if session_p is None: # session persistence is not configured, skip validation return persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE] if persistence_type == 'APP_COOKIE': if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME): return msg = _('Property cookie_name is required, when ' 'session_persistence type is set to APP_COOKIE.') raise exception.StackValidationFailed(message=msg) def handle_create(self): properties = self.prepare_properties( self.properties, self.physical_resource_name()) subnet_id = properties.pop(self.SUBNET) properties['subnet_id'] = subnet_id vip_properties = properties.pop(self.VIP) monitors = properties.pop(self.MONITORS) pool = self.client().create_pool({'pool': properties})['pool'] self.resource_id_set(pool['id']) for monitor in monitors: self.client().associate_health_monitor( pool['id'], {'health_monitor': {'id': monitor}}) vip_arguments = self.prepare_properties( vip_properties, '%s.vip' % (self.name,)) session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE) if session_p is not None: prepared_props = self.prepare_properties(session_p, None) vip_arguments['session_persistence'] = prepared_props vip_arguments['protocol'] = self.properties[self.PROTOCOL] if vip_arguments.get(self.VIP_SUBNET) is None: vip_arguments['subnet_id'] = subnet_id else: vip_arguments['subnet_id'] = vip_arguments.pop(self.VIP_SUBNET) vip_arguments['pool_id'] = pool['id'] vip = self.client().create_vip({'vip': vip_arguments})['vip'] self.metadata_set({'vip': vip['id']}) def _show_resource(self): return self.client().show_pool(self.resource_id)['pool'] def check_create_complete(self, data): attributes = self._show_resource() status = attributes['status'] if status == 'PENDING_CREATE': return False elif status == 'ACTIVE': vip_attributes = self.client().show_vip( self.metadata_get()['vip'])['vip'] vip_status = vip_attributes['status'] if vip_status == 'PENDING_CREATE': return False if vip_status == 'ACTIVE': return True if vip_status == 'ERROR': raise exception.ResourceInError( resource_status=vip_status, status_reason=_('error in vip')) raise exception.ResourceUnknownStatus( resource_status=vip_status, result=_('Pool creation failed due to vip')) elif status == 'ERROR': raise exception.ResourceInError( resource_status=status, status_reason=_('error in pool')) else: raise exception.ResourceUnknownStatus( resource_status=status, result=_('Pool creation failed')) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: if self.MONITORS in prop_diff: monitors = set(prop_diff.pop(self.MONITORS)) old_monitors = set(self.properties[self.MONITORS]) for monitor in old_monitors - monitors: self.client().disassociate_health_monitor( self.resource_id, monitor) for monitor in monitors - old_monitors: self.client().associate_health_monitor( self.resource_id, {'health_monitor': {'id': monitor}}) if prop_diff: self.client().update_pool(self.resource_id, {'pool': prop_diff}) def _resolve_attribute(self, name): if name == self.VIP_ATTR: return self.client().show_vip(self.metadata_get()['vip'])['vip'] return super(Pool, self)._resolve_attribute(name) def handle_delete(self): if not self.resource_id: prg = progress.PoolDeleteProgress(True) return prg prg = progress.PoolDeleteProgress() if not self.metadata_get(): prg.vip['delete_called'] = True prg.vip['deleted'] = True return prg def _delete_vip(self): return self._not_found_in_call( self.client().delete_vip, self.metadata_get()['vip']) def _check_vip_deleted(self): return self._not_found_in_call( self.client().show_vip, self.metadata_get()['vip']) def _delete_pool(self): return self._not_found_in_call( self.client().delete_pool, self.resource_id) def check_delete_complete(self, prg): if not prg.vip['delete_called']: prg.vip['deleted'] = self._delete_vip() prg.vip['delete_called'] = True return False if not prg.vip['deleted']: prg.vip['deleted'] = self._check_vip_deleted() return False if not prg.pool['delete_called']: prg.pool['deleted'] = self._delete_pool() prg.pool['delete_called'] = True return prg.pool['deleted'] if not prg.pool['deleted']: prg.pool['deleted'] = super(Pool, self).check_delete_complete(True) return prg.pool['deleted'] return True
class LoadBalancer(stack_resource.StackResource): """Implements a HAProxy-bearing instance as a nested stack. The template for the nested stack can be redefined with ``loadbalancer_template`` option in ``heat.conf``. Generally the image used for the instance must have the following packages installed or available for installation at runtime:: - heat-cfntools and its dependencies like python-psutil - cronie - socat - haproxy Current default builtin template uses Fedora 21 x86_64 base cloud image (https://getfedora.org/cloud/download/) and apart from installing packages goes through some hoops around SELinux due to pecularities of heat-cfntools. """ PROPERTIES = ( AVAILABILITY_ZONES, HEALTH_CHECK, INSTANCES, LISTENERS, APP_COOKIE_STICKINESS_POLICY, LBCOOKIE_STICKINESS_POLICY, SECURITY_GROUPS, SUBNETS, ) = ( 'AvailabilityZones', 'HealthCheck', 'Instances', 'Listeners', 'AppCookieStickinessPolicy', 'LBCookieStickinessPolicy', 'SecurityGroups', 'Subnets', ) _HEALTH_CHECK_KEYS = ( HEALTH_CHECK_HEALTHY_THRESHOLD, HEALTH_CHECK_INTERVAL, HEALTH_CHECK_TARGET, HEALTH_CHECK_TIMEOUT, HEALTH_CHECK_UNHEALTHY_THRESHOLD, ) = ( 'HealthyThreshold', 'Interval', 'Target', 'Timeout', 'UnhealthyThreshold', ) _LISTENER_KEYS = ( LISTENER_INSTANCE_PORT, LISTENER_LOAD_BALANCER_PORT, LISTENER_PROTOCOL, LISTENER_SSLCERTIFICATE_ID, LISTENER_POLICY_NAMES, ) = ( 'InstancePort', 'LoadBalancerPort', 'Protocol', 'SSLCertificateId', 'PolicyNames', ) ATTRIBUTES = ( CANONICAL_HOSTED_ZONE_NAME, CANONICAL_HOSTED_ZONE_NAME_ID, DNS_NAME, SOURCE_SECURITY_GROUP_GROUP_NAME, SOURCE_SECURITY_GROUP_OWNER_ALIAS, ) = ( 'CanonicalHostedZoneName', 'CanonicalHostedZoneNameID', 'DNSName', 'SourceSecurityGroup.GroupName', 'SourceSecurityGroup.OwnerAlias', ) properties_schema = { AVAILABILITY_ZONES: properties.Schema( properties.Schema.LIST, _('The Availability Zones in which to create the load balancer.'), required=True), HEALTH_CHECK: properties.Schema( properties.Schema.MAP, _('An application health check for the instances.'), schema={ HEALTH_CHECK_HEALTHY_THRESHOLD: properties.Schema( properties.Schema.INTEGER, _('The number of consecutive health probe successes ' 'required before moving the instance to the ' 'healthy state.'), required=True), HEALTH_CHECK_INTERVAL: properties.Schema( properties.Schema.INTEGER, _('The approximate interval, in seconds, between ' 'health checks of an individual instance.'), required=True), HEALTH_CHECK_TARGET: properties.Schema(properties.Schema.STRING, _('The port being checked.'), required=True), HEALTH_CHECK_TIMEOUT: properties.Schema(properties.Schema.INTEGER, _('Health probe timeout, in seconds.'), required=True), HEALTH_CHECK_UNHEALTHY_THRESHOLD: properties.Schema( properties.Schema.INTEGER, _('The number of consecutive health probe failures ' 'required before moving the instance to the ' 'unhealthy state'), required=True), }), INSTANCES: properties.Schema(properties.Schema.LIST, _('The list of instance IDs load balanced.'), update_allowed=True), LISTENERS: properties.Schema( properties.Schema.LIST, _('One or more listeners for this load balancer.'), schema=properties.Schema( properties.Schema.MAP, schema={ LISTENER_INSTANCE_PORT: properties.Schema( properties.Schema.INTEGER, _('TCP port on which the instance server is ' 'listening.'), required=True), LISTENER_LOAD_BALANCER_PORT: properties.Schema( properties.Schema.INTEGER, _('The external load balancer port number.'), required=True), LISTENER_PROTOCOL: properties.Schema( properties.Schema.STRING, _('The load balancer transport protocol to use.'), required=True, constraints=[ constraints.AllowedValues(['TCP', 'HTTP']), ]), LISTENER_SSLCERTIFICATE_ID: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), implemented=False), LISTENER_POLICY_NAMES: properties.Schema(properties.Schema.LIST, _('Not Implemented.'), implemented=False), }, ), required=True), APP_COOKIE_STICKINESS_POLICY: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), implemented=False), LBCOOKIE_STICKINESS_POLICY: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), implemented=False), SECURITY_GROUPS: properties.Schema(properties.Schema.LIST, _('List of Security Groups assigned on current LB.'), update_allowed=True), SUBNETS: properties.Schema(properties.Schema.LIST, _('Not Implemented.'), implemented=False), } attributes_schema = { CANONICAL_HOSTED_ZONE_NAME: attributes.Schema(_( "The name of the hosted zone that is associated with the " "LoadBalancer."), type=attributes.Schema.STRING), CANONICAL_HOSTED_ZONE_NAME_ID: attributes.Schema(_( "The ID of the hosted zone name that is associated with the " "LoadBalancer."), type=attributes.Schema.STRING), DNS_NAME: attributes.Schema(_("The DNS name for the LoadBalancer."), type=attributes.Schema.STRING), SOURCE_SECURITY_GROUP_GROUP_NAME: attributes.Schema(_( "The security group that you can use as part of your inbound " "rules for your LoadBalancer's back-end instances."), type=attributes.Schema.STRING), SOURCE_SECURITY_GROUP_OWNER_ALIAS: attributes.Schema(_("Owner of the source security group."), type=attributes.Schema.STRING), } def _haproxy_config_global(self): return ''' global daemon maxconn 256 stats socket /tmp/.haproxy-stats defaults mode http timeout connect 5000ms timeout client 50000ms timeout server 50000ms ''' def _haproxy_config_frontend(self): listener = self.properties[self.LISTENERS][0] lb_port = listener[self.LISTENER_LOAD_BALANCER_PORT] return ''' frontend http bind *:%s default_backend servers ''' % (lb_port) def _haproxy_config_backend(self): health_chk = self.properties[self.HEALTH_CHECK] if health_chk: timeout = int(health_chk[self.HEALTH_CHECK_TIMEOUT]) timeout_check = 'timeout check %ds' % timeout spaces = ' ' else: timeout_check = '' spaces = '' return ''' backend servers balance roundrobin option http-server-close option forwardfor option httpchk %s%s ''' % (spaces, timeout_check) def _haproxy_config_servers(self, instances): listener = self.properties[self.LISTENERS][0] inst_port = listener[self.LISTENER_INSTANCE_PORT] spaces = ' ' check = '' health_chk = self.properties[self.HEALTH_CHECK] if health_chk: check = ' check inter %ss fall %s rise %s' % ( health_chk[self.HEALTH_CHECK_INTERVAL], health_chk[self.HEALTH_CHECK_UNHEALTHY_THRESHOLD], health_chk[self.HEALTH_CHECK_HEALTHY_THRESHOLD]) servers = [] n = 1 nova_cp = self.client_plugin('nova') for i in instances or []: ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0' LOG.debug('haproxy server:%s' % ip) servers.append('%sserver server%d %s:%s%s' % (spaces, n, ip, inst_port, check)) n = n + 1 return '\n'.join(servers) def _haproxy_config(self, instances): # initial simplifications: # - only one Listener # - only http (no tcp or ssl) # # option httpchk HEAD /check.txt HTTP/1.0 return '%s%s%s%s\n' % (self._haproxy_config_global(), self._haproxy_config_frontend(), self._haproxy_config_backend(), self._haproxy_config_servers(instances)) def get_parsed_template(self): if cfg.CONF.loadbalancer_template: with open(cfg.CONF.loadbalancer_template) as templ_fd: LOG.info(_LI('Using custom loadbalancer template %s'), cfg.CONF.loadbalancer_template) contents = templ_fd.read() else: contents = lb_template_default return template_format.parse(contents) def child_params(self): params = {} params['SecurityGroups'] = self.properties[self.SECURITY_GROUPS] # If the owning stack defines KeyName, we use that key for the nested # template, otherwise use no key for magic_param in ('KeyName', 'LbFlavor', 'LBTimeout', 'LbImageId'): if magic_param in self.stack.parameters: params[magic_param] = self.stack.parameters[magic_param] return params def child_template(self): templ = self.get_parsed_template() # If the owning stack defines KeyName, we use that key for the nested # template, otherwise use no key if 'KeyName' not in self.stack.parameters: del templ['Resources']['LB_instance']['Properties']['KeyName'] del templ['Parameters']['KeyName'] return templ def handle_create(self): templ = self.child_template() params = self.child_params() if self.properties[self.INSTANCES]: md = templ['Resources']['LB_instance']['Metadata'] files = md['AWS::CloudFormation::Init']['config']['files'] cfg = self._haproxy_config(self.properties[self.INSTANCES]) files['/etc/haproxy/haproxy.cfg']['content'] = cfg return self.create_with_template(templ, params) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Re-generate the Metadata. Save it to the db. Rely on the cfn-hup to reconfigure HAProxy. """ new_props = json_snippet.properties(self.properties_schema, self.context) # Valid use cases are: # - Membership controlled by members property in template # - Empty members property in template; membership controlled by # "updates" triggered from autoscaling group. # Mixing the two will lead to undefined behaviour. if (self.INSTANCES in prop_diff and (self.properties[self.INSTANCES] is not None or new_props[self.INSTANCES] is not None)): cfg = self._haproxy_config(prop_diff[self.INSTANCES]) md = self.nested()['LB_instance'].metadata_get() files = md['AWS::CloudFormation::Init']['config']['files'] files['/etc/haproxy/haproxy.cfg']['content'] = cfg self.nested()['LB_instance'].metadata_set(md) if self.SECURITY_GROUPS in prop_diff: templ = self.child_template() params = self.child_params() params['SecurityGroups'] = new_props[self.SECURITY_GROUPS] self.update_with_template(templ, params) def check_update_complete(self, updater): """Because we are not calling update_with_template, return True.""" return True def validate(self): """Validate any of the provided params.""" res = super(LoadBalancer, self).validate() if res: return res if (cfg.CONF.loadbalancer_template and not os.access(cfg.CONF.loadbalancer_template, os.R_OK)): msg = _('Custom LoadBalancer template can not be found') raise exception.StackValidationFailed(message=msg) health_chk = self.properties[self.HEALTH_CHECK] if health_chk: interval = float(health_chk[self.HEALTH_CHECK_INTERVAL]) timeout = float(health_chk[self.HEALTH_CHECK_TIMEOUT]) if interval < timeout: return {'Error': 'Interval must be larger than Timeout'} def get_reference_id(self): return six.text_type(self.name) def _resolve_attribute(self, name): """We don't really support any of these yet.""" if name == self.DNS_NAME: return self.get_output('PublicIp') elif name in self.attributes_schema: # Not sure if we should return anything for the other attribs # since they aren't really supported in any meaningful way return ''
class PoolMember(neutron.NeutronResource): """A resource to handle loadbalancer members. A pool member represents the application running on backend server. """ required_service_extension = 'lbaas' support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( POOL_ID, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP, ) = ( 'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, TENANT_ID, WEIGHT_ATTR, ADDRESS_ATTR, POOL_ID_ATTR, PROTOCOL_PORT_ATTR, ) = ( 'admin_state_up', 'tenant_id', 'weight', 'address', 'pool_id', 'protocol_port', ) properties_schema = { POOL_ID: properties.Schema( properties.Schema.STRING, _('The ID of the load balancing pool.'), required=True, update_allowed=True ), ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the pool member on the pool network.'), required=True, constraints=[ constraints.CustomConstraint('ip_addr') ] ), PROTOCOL_PORT: properties.Schema( properties.Schema.INTEGER, _('TCP port on which the pool member listens for requests or ' 'connections.'), required=True, constraints=[ constraints.Range(0, 65535), ] ), WEIGHT: properties.Schema( properties.Schema.INTEGER, _('Weight of pool member in the pool (default to 1).'), constraints=[ constraints.Range(0, 256), ], update_allowed=True ), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('The administrative state of the pool member.'), default=True ), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema( _('The administrative state of this pool member.'), type=attributes.Schema.STRING ), TENANT_ID: attributes.Schema( _('Tenant owning the pool member.'), type=attributes.Schema.STRING ), WEIGHT_ATTR: attributes.Schema( _('Weight of the pool member in the pool.'), type=attributes.Schema.STRING ), ADDRESS_ATTR: attributes.Schema( _('IP address of the pool member.'), type=attributes.Schema.STRING ), POOL_ID_ATTR: attributes.Schema( _('The ID of the load balancing pool.'), type=attributes.Schema.STRING ), PROTOCOL_PORT_ATTR: attributes.Schema( _('TCP port on which the pool member listens for requests or ' 'connections.'), type=attributes.Schema.STRING ), } def handle_create(self): pool = self.properties[self.POOL_ID] protocol_port = self.properties[self.PROTOCOL_PORT] address = self.properties[self.ADDRESS] admin_state_up = self.properties[self.ADMIN_STATE_UP] weight = self.properties[self.WEIGHT] params = { 'pool_id': pool, 'address': address, 'protocol_port': protocol_port, 'admin_state_up': admin_state_up } if weight is not None: params['weight'] = weight member = self.client().create_member({'member': params})['member'] self.resource_id_set(member['id']) def _show_resource(self): return self.client().show_member(self.resource_id)['member'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_member( self.resource_id, {'member': prop_diff}) def handle_delete(self): if not self.resource_id: return try: self.client().delete_member(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin): support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE, COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD, HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS, INSTANCE_ID, ) = ( 'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize', 'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags', 'InstanceId', ) _TAG_KEYS = ( TAG_KEY, TAG_VALUE, ) = ( 'Key', 'Value', ) _UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE) = ( 'AutoScalingRollingUpdate') _ROLLING_UPDATE_SCHEMA_KEYS = (MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME) = ('MinInstancesInService', 'MaxBatchSize', 'PauseTime') ATTRIBUTES = (INSTANCE_LIST, ) = ('InstanceList', ) properties_schema = { AVAILABILITY_ZONES: properties.Schema(properties.Schema.LIST, _('Not Implemented.'), required=True), LAUNCH_CONFIGURATION_NAME: properties.Schema( properties.Schema.STRING, _('The reference to a LaunchConfiguration resource.'), update_allowed=True), INSTANCE_ID: properties.Schema( properties.Schema.STRING, _('The ID of an existing instance to use to ' 'create the Auto Scaling group. If specify this property, ' 'will create the group use an existing instance instead of ' 'a launch configuration.'), constraints=[constraints.CustomConstraint("nova.server")]), MAX_SIZE: properties.Schema(properties.Schema.INTEGER, _('Maximum number of instances in the group.'), required=True, update_allowed=True), MIN_SIZE: properties.Schema(properties.Schema.INTEGER, _('Minimum number of instances in the group.'), required=True, update_allowed=True), COOLDOWN: properties.Schema(properties.Schema.INTEGER, _('Cooldown period, in seconds.'), update_allowed=True), DESIRED_CAPACITY: properties.Schema(properties.Schema.INTEGER, _('Desired initial number of instances.'), update_allowed=True), HEALTH_CHECK_GRACE_PERIOD: properties.Schema(properties.Schema.INTEGER, _('Not Implemented.'), implemented=False), HEALTH_CHECK_TYPE: properties.Schema(properties.Schema.STRING, _('Not Implemented.'), constraints=[ constraints.AllowedValues(['EC2', 'ELB']), ], implemented=False), LOAD_BALANCER_NAMES: properties.Schema(properties.Schema.LIST, _('List of LoadBalancer resources.')), VPCZONE_IDENTIFIER: properties.Schema( properties.Schema.LIST, _('Use only with Neutron, to list the internal subnet to ' 'which the instance will be attached; ' 'needed only if multiple exist; ' 'list length must be exactly 1.'), schema=properties.Schema( properties.Schema.STRING, _('UUID of the internal subnet to which the instance ' 'will be attached.'))), TAGS: properties.Schema(properties.Schema.LIST, _('Tags to attach to this group.'), schema=properties.Schema( properties.Schema.MAP, schema={ TAG_KEY: properties.Schema(properties.Schema.STRING, required=True), TAG_VALUE: properties.Schema(properties.Schema.STRING, required=True), }, )), } attributes_schema = { INSTANCE_LIST: attributes.Schema(_("A comma-delimited list of server ip addresses. " "(Heat extension)."), type=attributes.Schema.STRING), } rolling_update_schema = { MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER, default=0), MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER, default=1), PAUSE_TIME: properties.Schema(properties.Schema.STRING, default='PT0S') } update_policy_schema = { ROLLING_UPDATE: properties.Schema(properties.Schema.MAP, schema=rolling_update_schema) } def handle_create(self): return self.create_with_template(self.child_template()) def _get_members(self, group_id): members = [] for res in self.stack.iter_resources(cfg.CONF.max_nested_stack_depth): if (res.type() in ['OS::Nova::Server'] and res.status == res.COMPLETE): members.append({ 'id': res.resource_id, 'name': res.name, 'group_id': group_id }) return members def _add_scheduler(self, group_id): task_args = { 'group_name': 'groupwatch', 'job_name': group_id, 'job_type': 'period', 'trigger_type': 'SIMPLE_TRIGGER', 'interval': 240, 'cover_flag': 'true', 'end_time': 4076884800000, 'meta_data': { 'group_id': group_id, 'project_id': self.context.tenant_id } } rsp = self.client('scheduler').scheduler.create(**task_args) return rsp.get('job_id') def _create_groupwatch(self): if not cfg.CONF.FusionSphere.groupwatch_enable: return group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id members = self._get_members(group_id) job_id = self._add_scheduler(group_id) kwargs = { 'id': group_id, 'name': self.name, 'type': 'VM', 'data': { 'scheduler_job_id': job_id }, 'members': members } self.client('groupwatch').groups.create(**kwargs) def _make_launch_config_resource(self, name, props): lc_res_type = 'AWS::AutoScaling::LaunchConfiguration' lc_res_def = rsrc_defn.ResourceDefinition(name, lc_res_type, props) lc_res = resource.Resource(name, lc_res_def, self.stack) return lc_res def _get_conf_properties(self): instance_id = self.properties.get(self.INSTANCE_ID) if instance_id: server = self.client_plugin('nova').get_server(instance_id) instance_props = { 'ImageId': server.image['id'], 'InstanceType': server.flavor['id'], 'KeyName': server.key_name, 'SecurityGroups': [sg['name'] for sg in server.security_groups] } conf = self._make_launch_config_resource(self.name, instance_props) props = function.resolve(conf.properties.data) else: conf, props = super(AutoScalingGroup, self)._get_conf_properties() vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER) if vpc_zone_ids: props['SubnetId'] = vpc_zone_ids[0] return conf, props def check_create_complete(self, task): """Update cooldown timestamp after create succeeds.""" done = super(AutoScalingGroup, self).check_create_complete(task) if done: self._create_groupwatch() self._finished_scaling( "%s : %s" % (sc_util.CFN_EXACT_CAPACITY, grouputils.get_size(self))) return done def check_update_complete(self, cookie): """Update the cooldown timestamp after update succeeds.""" done = super(AutoScalingGroup, self).check_update_complete(cookie) if done: self._finished_scaling( "%s : %s" % (sc_util.CFN_EXACT_CAPACITY, grouputils.get_size(self))) return done def _get_new_capacity(self, capacity, adjustment, adjustment_type=sc_util.CFN_EXACT_CAPACITY, min_adjustment_step=None): lower = self.properties[self.MIN_SIZE] upper = self.properties[self.MAX_SIZE] return sc_util.calculate_new_capacity(capacity, adjustment, adjustment_type, min_adjustment_step, lower, upper) def _update_groupwatch(self): if not cfg.CONF.FusionSphere.groupwatch_enable: return group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id members = self._get_members(group_id) kwargs = { 'id': group_id, 'name': self.name, 'type': 'VM', 'members': members } self.client('groupwatch').groups.update(group_id, **kwargs) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Updates self.properties, if Properties has changed. If Properties has changed, update self.properties, so we get the new values during any subsequent adjustment. """ if tmpl_diff: # parse update policy if 'UpdatePolicy' in tmpl_diff: up = json_snippet.update_policy(self.update_policy_schema, self.context) self.update_policy = up self.properties = json_snippet.properties(self.properties_schema, self.context) if prop_diff: # Replace instances first if launch configuration has changed self._try_rolling_update(prop_diff) # Update will happen irrespective of whether auto-scaling # is in progress or not. capacity = grouputils.get_size(self) desired_capacity = self.properties[self.DESIRED_CAPACITY] or capacity new_capacity = self._get_new_capacity(capacity, desired_capacity) self.resize(new_capacity) def adjust(self, adjustment, adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY, min_adjustment_step=None): """Adjust the size of the scaling group if the cooldown permits.""" if not self._is_scaling_allowed(): LOG.info( _LI("%(name)s NOT performing scaling adjustment, " "cooldown %(cooldown)s"), { 'name': self.name, 'cooldown': self.properties[self.COOLDOWN] }) raise exception.NoActionRequired() capacity = grouputils.get_size(self) new_capacity = self._get_new_capacity(capacity, adjustment, adjustment_type, min_adjustment_step) changed_size = new_capacity != capacity # send a notification before, on-error and on-success. notif = { 'stack': self.stack, 'adjustment': adjustment, 'adjustment_type': adjustment_type, 'capacity': capacity, 'groupname': self.FnGetRefId(), 'message': _("Start resizing the group %(group)s") % { 'group': self.FnGetRefId() }, 'suffix': 'start', } notification.send(**notif) try: self.resize(new_capacity) except Exception as resize_ex: with excutils.save_and_reraise_exception(): try: notif.update({ 'suffix': 'error', 'message': six.text_type(resize_ex), 'capacity': grouputils.get_size(self), }) notification.send(**notif) except Exception: LOG.exception(_LE('Failed sending error notification')) else: notif.update({ 'suffix': 'end', 'capacity': new_capacity, 'message': _("End resizing the group %(group)s") % { 'group': notif['groupname'] }, }) notification.send(**notif) finally: self._update_groupwatch() self._finished_scaling("%s : %s" % (adjustment_type, adjustment), changed_size=changed_size) return changed_size def _tags(self): """Add Identifying Tags to all servers in the group. This is so the Dimensions received from cfn-push-stats all include the groupname and stack id. Note: the group name must match what is returned from FnGetRefId """ autoscaling_tag = [{ self.TAG_KEY: 'metering.AutoScalingGroupName', self.TAG_VALUE: self.FnGetRefId() }] return super(AutoScalingGroup, self)._tags() + autoscaling_tag def validate(self): # check validity of group size min_size = self.properties[self.MIN_SIZE] max_size = self.properties[self.MAX_SIZE] if max_size < min_size: msg = _("MinSize can not be greater than MaxSize") raise exception.StackValidationFailed(message=msg) if min_size < 0: msg = _("The size of AutoScalingGroup can not be less than zero") raise exception.StackValidationFailed(message=msg) if self.properties[self.DESIRED_CAPACITY] is not None: desired_capacity = self.properties[self.DESIRED_CAPACITY] if desired_capacity < min_size or desired_capacity > max_size: msg = _("DesiredCapacity must be between MinSize and MaxSize") raise exception.StackValidationFailed(message=msg) # TODO(pasquier-s): once Neutron is able to assign subnets to # availability zones, it will be possible to specify multiple subnets. # For now, only one subnet can be specified. The bug #1096017 tracks # this issue. if (self.properties.get(self.VPCZONE_IDENTIFIER) and len(self.properties[self.VPCZONE_IDENTIFIER]) != 1): raise exception.NotSupported(feature=_("Anything other than one " "VPCZoneIdentifier")) # validate properties InstanceId and LaunchConfigurationName # for aws auto scaling group. # should provide just only one of if self.type() == 'AWS::AutoScaling::AutoScalingGroup': instanceId = self.properties.get(self.INSTANCE_ID) launch_config = self.properties.get(self.LAUNCH_CONFIGURATION_NAME) if bool(instanceId) == bool(launch_config): msg = _("Either 'InstanceId' or 'LaunchConfigurationName' " "must be provided.") raise exception.StackValidationFailed(message=msg) super(AutoScalingGroup, self).validate() def _resolve_attribute(self, name): """Resolves the resource's attributes. heat extension: "InstanceList" returns comma delimited list of server ip addresses. """ if name == self.INSTANCE_LIST: return u','.join( inst.FnGetAtt('PublicIp') for inst in grouputils.get_members(self)) or None def child_template(self): if self.properties[self.DESIRED_CAPACITY]: num_instances = self.properties[self.DESIRED_CAPACITY] else: num_instances = self.properties[self.MIN_SIZE] return self._create_template(num_instances) def _delete_groupwatch(self): if not cfg.CONF.FusionSphere.groupwatch_enable: return if not self.resource_id: return group = None try: group = self.client('groupwatch').groups.get(self.resource_id) except Exception as ex: self.client_plugin('groupwatch').ignore_not_found(ex) return try: if (group and group.get('group') and 'data' in group.get('group')): scheduler_job_id = \ group.get('group').get('data').get('scheduler_job_id') self.client('scheduler').scheduler.delete(scheduler_job_id) except (AttributeError, KeyError): # do nothing pass except Exception as ex: self.client_plugin('scheduler').ignore_not_found(ex) try: self.client('groupwatch').groups.delete(self.resource_id) except Exception as ex: self.client_plugin('groupwatch').ignore_not_found(ex) def handle_delete(self): self._delete_groupwatch() return self.delete_nested() def handle_metadata_reset(self): metadata = self.metadata_get() if 'scaling_in_progress' in metadata: metadata['scaling_in_progress'] = False self.metadata_set(metadata)