class OSDBInstance(resource.Resource): ''' OpenStack cloud database instance resource. ''' support_status = support.SupportStatus(version='2014.1') TROVE_STATUS = ( ERROR, FAILED, ACTIVE, ) = ( 'ERROR', 'FAILED', 'ACTIVE', ) TROVE_STATUS_REASON = { FAILED: _('The database instance was created, but heat failed to set ' 'up the datastore. If a database instance is in the FAILED ' 'state, it should be deleted and a new one should be ' 'created.'), ERROR: _('The last operation for the database instance failed due to ' 'an error.'), } BAD_STATUSES = (ERROR, FAILED) PROPERTIES = ( NAME, FLAVOR, SIZE, DATABASES, USERS, AVAILABILITY_ZONE, RESTORE_POINT, DATASTORE_TYPE, DATASTORE_VERSION, NICS, REPLICA_OF, REPLICA_COUNT, ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone', 'restore_point', 'datastore_type', 'datastore_version', 'networks', 'replica_of', 'replica_count') _DATABASE_KEYS = ( DATABASE_CHARACTER_SET, DATABASE_COLLATE, DATABASE_NAME, ) = ( 'character_set', 'collate', 'name', ) _USER_KEYS = ( USER_NAME, USER_PASSWORD, USER_HOST, USER_DATABASES, ) = ( 'name', 'password', 'host', 'databases', ) _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip') ATTRIBUTES = ( HOSTNAME, HREF, ) = ( 'hostname', 'href', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name of the DB instance to create.'), constraints=[ constraints.Length(max=255), ]), FLAVOR: properties.Schema( properties.Schema.STRING, _('Reference to a flavor for creating DB instance.'), required=True, constraints=[constraints.CustomConstraint('trove.flavor')]), DATASTORE_TYPE: properties.Schema(properties.Schema.STRING, _("Name of registered datastore type."), constraints=[constraints.Length(max=255)]), DATASTORE_VERSION: properties.Schema( properties.Schema.STRING, _("Name of the registered datastore version. " "It must exist for provided datastore type. " "Defaults to using single active version. " "If several active versions exist for provided datastore type, " "explicit value for this parameter must be specified."), constraints=[constraints.Length(max=255)]), SIZE: properties.Schema(properties.Schema.INTEGER, _('Database volume size in GB.'), required=True, constraints=[ constraints.Range(1, 150), ]), NICS: properties.Schema( properties.Schema.LIST, _("List of network interfaces to create on instance."), default=[], schema=properties.Schema( properties.Schema.MAP, schema={ NET: properties.Schema( properties.Schema.STRING, _('Name or UUID of the network to attach this NIC to. ' 'Either %(port)s or %(net)s must be specified.') % { 'port': PORT, 'net': NET }, constraints=[ constraints.CustomConstraint('neutron.network') ]), PORT: properties.Schema( properties.Schema.STRING, _('Name or UUID of Neutron port to attach this ' 'NIC to. ' 'Either %(port)s or %(net)s must be specified.') % { 'port': PORT, 'net': NET }, constraints=[ constraints.CustomConstraint('neutron.port') ], ), V4_FIXED_IP: properties.Schema( properties.Schema.STRING, _('Fixed IPv4 address for this NIC.'), constraints=[constraints.CustomConstraint('ip_addr')]), }, ), ), DATABASES: properties.Schema( properties.Schema.LIST, _('List of databases to be created on DB instance creation.'), default=[], schema=properties.Schema( properties.Schema.MAP, schema={ DATABASE_CHARACTER_SET: properties.Schema(properties.Schema.STRING, _('Set of symbols and encodings.'), default='utf8'), DATABASE_COLLATE: properties.Schema( properties.Schema.STRING, _('Set of rules for comparing characters in a ' 'character set.'), default='utf8_general_ci'), DATABASE_NAME: properties.Schema( properties.Schema.STRING, _('Specifies database names for creating ' 'databases on instance creation.'), required=True, constraints=[ constraints.Length(max=64), constraints.AllowedPattern(r'[a-zA-Z0-9_]+' r'[a-zA-Z0-9_@?#\s]*' r'[a-zA-Z0-9_]+'), ]), }, )), USERS: properties.Schema( properties.Schema.LIST, _('List of users to be created on DB instance creation.'), default=[], schema=properties.Schema( properties.Schema.MAP, schema={ USER_NAME: properties.Schema( properties.Schema.STRING, _('User name to create a user on instance ' 'creation.'), required=True, constraints=[ constraints.Length(max=16), constraints.AllowedPattern(r'[a-zA-Z0-9_]+' r'[a-zA-Z0-9_@?#\s]*' r'[a-zA-Z0-9_]+'), ]), USER_PASSWORD: properties.Schema(properties.Schema.STRING, _('Password for those users on instance ' 'creation.'), required=True, constraints=[ constraints.AllowedPattern( r'[a-zA-Z0-9_]+' r'[a-zA-Z0-9_@?#\s]*' r'[a-zA-Z0-9_]+'), ]), USER_HOST: properties.Schema( properties.Schema.STRING, _('The host from which a user is allowed to ' 'connect to the database.'), default='%'), USER_DATABASES: properties.Schema( properties.Schema.LIST, _('Names of databases that those users can ' 'access on instance creation.'), schema=properties.Schema(properties.Schema.STRING, ), required=True, constraints=[ constraints.Length(min=1), ]), }, )), AVAILABILITY_ZONE: properties.Schema(properties.Schema.STRING, _('Name of the availability zone for DB instance.')), RESTORE_POINT: properties.Schema(properties.Schema.STRING, _('DB instance restore point.')), REPLICA_OF: properties.Schema( properties.Schema.STRING, _('Identifier of the source instance to replicate.'), support_status=support.SupportStatus(version='5.0.0')), REPLICA_COUNT: properties.Schema( properties.Schema.INTEGER, _('The number of replicas to be created.'), support_status=support.SupportStatus(version='5.0.0')), } attributes_schema = { HOSTNAME: attributes.Schema(_("Hostname of the instance."), type=attributes.Schema.STRING), HREF: attributes.Schema(_("Api endpoint reference of the instance."), type=attributes.Schema.STRING), } default_client_name = 'trove' entity = 'instances' def __init__(self, name, json_snippet, stack): super(OSDBInstance, self).__init__(name, json_snippet, stack) self._href = None self._dbinstance = None @property def dbinstance(self): """Get the trove dbinstance.""" if not self._dbinstance and self.resource_id: self._dbinstance = self.client().instances.get(self.resource_id) return self._dbinstance def _dbinstance_name(self): name = self.properties[self.NAME] if name: return name return self.physical_resource_name() def handle_create(self): ''' Create cloud database instance. ''' self.flavor = self.client_plugin().get_flavor_id( self.properties[self.FLAVOR]) self.volume = {'size': self.properties[self.SIZE]} self.databases = self.properties[self.DATABASES] self.users = self.properties[self.USERS] restore_point = self.properties[self.RESTORE_POINT] if restore_point: restore_point = {"backupRef": restore_point} zone = self.properties[self.AVAILABILITY_ZONE] self.datastore_type = self.properties[self.DATASTORE_TYPE] self.datastore_version = self.properties[self.DATASTORE_VERSION] replica_of = self.properties[self.REPLICA_OF] replica_count = self.properties[self.REPLICA_COUNT] # convert user databases to format required for troveclient. # that is, list of database dictionaries for user in self.users: dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])] user[self.USER_DATABASES] = dbs # convert networks to format required by troveclient nics = [] for nic in self.properties[self.NICS]: nic_dict = {} net = nic.get(self.NET) if net: if self.is_using_neutron(): net_id = ( self.client_plugin('neutron').find_neutron_resource( nic, self.NET, 'network')) else: net_id = ( self.client_plugin('nova').get_nova_network_id(net)) nic_dict['net-id'] = net_id port = nic.get(self.PORT) if port: neutron = self.client_plugin('neutron') nic_dict['port-id'] = neutron.find_neutron_resource( nic, self.PORT, 'port') ip = nic.get(self.V4_FIXED_IP) if ip: nic_dict['v4-fixed-ip'] = ip nics.append(nic_dict) # create db instance instance = self.client().instances.create( self._dbinstance_name(), self.flavor, volume=self.volume, databases=self.databases, users=self.users, restorePoint=restore_point, availability_zone=zone, datastore=self.datastore_type, datastore_version=self.datastore_version, nics=nics, replica_of=replica_of, replica_count=replica_count) self.resource_id_set(instance.id) return instance.id def _refresh_instance(self, instance_id): try: instance = self.client().instances.get(instance_id) return instance except Exception as exc: if self.client_plugin().is_over_limit(exc): LOG.warn( _LW("Stack %(name)s (%(id)s) received an " "OverLimit response during instance.get():" " %(exception)s"), { 'name': self.stack.name, 'id': self.stack.id, 'exception': exc }) return None else: raise def check_create_complete(self, instance_id): ''' Check if cloud DB instance creation is complete. ''' instance = self._refresh_instance(instance_id) # refresh attributes if instance is None: return False if instance.status in self.BAD_STATUSES: raise exception.ResourceInError( resource_status=instance.status, status_reason=self.TROVE_STATUS_REASON.get( instance.status, _("Unknown"))) if instance.status != self.ACTIVE: return False LOG.info( _LI("Database instance %(database)s created (flavor:%(" "flavor)s,volume:%(volume)s, datastore:%(" "datastore_type)s, datastore_version:%(" "datastore_version)s)"), { 'database': self._dbinstance_name(), 'flavor': self.flavor, 'volume': self.volume, 'datastore_type': self.datastore_type, 'datastore_version': self.datastore_version }) return True def handle_check(self): instance = self.client().instances.get(self.resource_id) status = instance.status checks = [ { 'attr': 'status', 'expected': self.ACTIVE, 'current': status }, ] self._verify_check_conditions(checks) def handle_delete(self): ''' Delete a cloud database instance. ''' if not self.resource_id: return try: instance = self.client().instances.get(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: instance.delete() return instance.id def check_delete_complete(self, instance_id): ''' Check for completion of cloud DB instance deletion ''' if not instance_id: return True try: # For some time trove instance may continue to live self._refresh_instance(instance_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) return True return False def validate(self): ''' Validate any of the provided params ''' res = super(OSDBInstance, self).validate() if res: return res datastore_type = self.properties[self.DATASTORE_TYPE] datastore_version = self.properties[self.DATASTORE_VERSION] self.client_plugin().validate_datastore(datastore_type, datastore_version, self.DATASTORE_TYPE, self.DATASTORE_VERSION) # check validity of user and databases users = self.properties[self.USERS] if users: databases = self.properties[self.DATABASES] if not databases: msg = _('Databases property is required if users property ' 'is provided for resource %s.') % self.name raise exception.StackValidationFailed(message=msg) db_names = set([db[self.DATABASE_NAME] for db in databases]) for user in users: missing_db = [ db_name for db_name in user[self.USER_DATABASES] if db_name not in db_names ] if missing_db: msg = (_('Database %(dbs)s specified for user does ' 'not exist in databases for resource %(name)s.') % { 'dbs': missing_db, 'name': self.name }) raise exception.StackValidationFailed(message=msg) # check validity of NICS is_neutron = self.is_using_neutron() nics = self.properties[self.NICS] for nic in nics: if not is_neutron and nic.get(self.PORT): msg = _("Can not use %s property on Nova-network.") % self.PORT raise exception.StackValidationFailed(message=msg) if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)): msg = _("Either %(net)s or %(port)s must be provided.") % { 'net': self.NET, 'port': self.PORT } raise exception.StackValidationFailed(message=msg) def href(self): if not self._href and self.dbinstance: if not self.dbinstance.links: self._href = None else: for link in self.dbinstance.links: if link['rel'] == 'self': self._href = link[self.HREF] break return self._href def _resolve_attribute(self, name): if name == self.HOSTNAME: return self.dbinstance.hostname elif name == self.HREF: return self.href()
class KeystoneProject(resource.Resource): """Heat Template Resource for Keystone Project. Projects represent the base unit of ownership in OpenStack, in that all resources in OpenStack should be owned by a specific project. A project itself must be owned by a specific domain, and hence all project names are not globally unique, but unique to their domain. If the domain for a project is not specified, then it is added to the default domain. """ support_status = support.SupportStatus( version='2015.1', message=_('Supported versions: keystone v3')) default_client_name = 'keystone' entity = 'projects' PROPERTIES = ( NAME, DOMAIN, DESCRIPTION, ENABLED, PARENT, ) = ( 'name', 'domain', 'description', 'enabled', 'parent', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name of keystone project.'), update_allowed=True), DOMAIN: properties.Schema( properties.Schema.STRING, _('Name or id of keystone domain.'), default='default', update_allowed=True, constraints=[constraints.CustomConstraint('keystone.domain')]), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description of keystone project.'), default='', update_allowed=True), ENABLED: properties.Schema(properties.Schema.BOOLEAN, _('This project is enabled or disabled.'), default=True, update_allowed=True), PARENT: properties.Schema( properties.Schema.STRING, _('The name or ID of parent of this keystone project ' 'in hierarchy.'), support_status=support.SupportStatus(version='6.0.0'), constraints=[constraints.CustomConstraint('keystone.project')]), } def client(self): return super(KeystoneProject, self).client().client def handle_create(self): project_name = (self.properties[self.NAME] or self.physical_resource_name()) description = self.properties[self.DESCRIPTION] domain = self.client_plugin().get_domain_id( self.properties[self.DOMAIN]) enabled = self.properties[self.ENABLED] pp = self.properties[self.PARENT] parent = self.client_plugin().get_project_id(pp) project = self.client().projects.create(name=project_name, domain=domain, description=description, enabled=enabled, parent=parent) self.resource_id_set(project.id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: name = None # Don't update the name if no change if self.NAME in prop_diff: name = prop_diff[self.NAME] or self.physical_resource_name() description = prop_diff.get(self.DESCRIPTION) enabled = prop_diff.get(self.ENABLED) domain = (prop_diff.get(self.DOMAIN) or self._stored_properties_data.get(self.DOMAIN)) domain_id = self.client_plugin().get_domain_id(domain) self.client().projects.update(project=self.resource_id, name=name, description=description, enabled=enabled, domain=domain_id)
class KeystoneUser(resource.Resource, role_assignments.KeystoneRoleAssignmentMixin): """Heat Template Resource for Keystone User.""" support_status = support.SupportStatus( version='2015.1', message=_('Supported versions: keystone v3')) default_client_name = 'keystone' PROPERTIES = (NAME, DOMAIN, DESCRIPTION, ENABLED, EMAIL, PASSWORD, DEFAULT_PROJECT, GROUPS) = ('name', 'domain', 'description', 'enabled', 'email', 'password', 'default_project', 'groups') properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name of keystone user.'), update_allowed=True), DOMAIN: properties.Schema( properties.Schema.STRING, _('Name of keystone domain.'), default='default', update_allowed=True, constraints=[constraints.CustomConstraint('keystone.domain')]), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description of keystone user.'), default='', update_allowed=True), ENABLED: properties.Schema(properties.Schema.BOOLEAN, _('Keystone user is enabled or disabled'), default=True, update_allowed=True), EMAIL: properties.Schema(properties.Schema.STRING, _('Email address of keystone user.'), update_allowed=True), PASSWORD: properties.Schema(properties.Schema.STRING, _('Password of keystone user.'), update_allowed=True), DEFAULT_PROJECT: properties.Schema( properties.Schema.STRING, _('Default project of keystone user.'), update_allowed=True, constraints=[constraints.CustomConstraint('keystone.project')]), GROUPS: properties.Schema( properties.Schema.LIST, _('keystone user groups.'), update_allowed=True, schema=properties.Schema( properties.Schema.STRING, _('keystone user group.'), constraints=[constraints.CustomConstraint('keystone.group')])) } properties_schema.update( role_assignments.KeystoneRoleAssignmentMixin.mixin_properties_schema) def validate(self): super(KeystoneUser, self).validate() self.validate_assignment_properties() def _create_user(self, user_name, description, domain, default_project, enabled=None, email=None, password=None): domain = (self.client_plugin().get_domain_id(domain)) if default_project: default_project = ( self.client_plugin().get_project_id(default_project)) return self.client().client.users.create( name=user_name, domain=domain, description=description, enabled=enabled, email=email, password=password, default_project=default_project) def _delete_user(self, user_id): return self.client().client.users.delete(user_id) def _update_user(self, user_id, domain, new_name=None, new_description=None, new_email=None, new_password=None, new_default_project=None, enabled=None): values = dict() if new_name is not None: values['name'] = new_name if new_description is not None: values['description'] = new_description if new_email is not None: values['email'] = new_email if new_password is not None: values['password'] = new_password if new_default_project is not None: values['default_project'] = new_default_project if enabled is not None: values['enabled'] = enabled values['user'] = user_id domain = (self.client_plugin().get_domain_id(domain)) values['domain'] = domain return self.client().client.users.update(**values) def _add_user_to_groups(self, user_id, groups): if groups is not None: group_ids = [ self.client_plugin().get_group_id(group) for group in groups ] for group_id in group_ids: self.client().client.users.add_to_group(user_id, group_id) def _remove_user_from_groups(self, user_id, groups): if groups is not None: group_ids = [ self.client_plugin().get_group_id(group) for group in groups ] for group_id in group_ids: self.client().client.users.remove_from_group(user_id, group_id) def _find_diff(self, updated_prps, stored_prps): new_group_ids = [ self.client_plugin().get_group_id(group) for group in (set(updated_prps or []) - set(stored_prps or [])) ] removed_group_ids = [ self.client_plugin().get_group_id(group) for group in (set(stored_prps or []) - set(updated_prps or [])) ] return new_group_ids, removed_group_ids def handle_create(self): user_name = (self.properties.get(self.NAME) or self.physical_resource_name()) description = self.properties.get(self.DESCRIPTION) domain = self.properties.get(self.DOMAIN) enabled = self.properties.get(self.ENABLED) email = self.properties.get(self.EMAIL) password = self.properties.get(self.PASSWORD) default_project = self.properties.get(self.DEFAULT_PROJECT) groups = self.properties.get(self.GROUPS) user = self._create_user(user_name=user_name, description=description, domain=domain, enabled=enabled, email=email, password=password, default_project=default_project) self.resource_id_set(user.id) self._add_user_to_groups(user.id, groups) self.create_assignment(user_id=user.id) def handle_update(self, json_snippet, tmpl_diff, prop_diff): name = prop_diff.get(self.NAME) or self.physical_resource_name() description = prop_diff.get(self.DESCRIPTION) enabled = prop_diff.get(self.ENABLED) email = prop_diff.get(self.EMAIL) password = prop_diff.get(self.PASSWORD) domain = (prop_diff.get(self.DOMAIN) or self._stored_properties_data.get(self.DOMAIN)) default_project = prop_diff.get(self.DEFAULT_PROJECT) (new_group_ids, removed_group_ids) = self._find_diff( prop_diff.get(self.GROUPS), self._stored_properties_data.get(self.GROUPS)) self._update_user(user_id=self.resource_id, domain=domain, new_name=name, new_description=description, enabled=enabled, new_default_project=default_project, new_email=email, new_password=password) if len(new_group_ids) > 0: self._add_user_to_groups(self.resource_id, new_group_ids) if len(removed_group_ids) > 0: self._remove_user_from_groups(self.resource_id, removed_group_ids) self.update_assignment(prop_diff=prop_diff, user_id=self.resource_id) def handle_delete(self): if self.resource_id is not None: try: self.delete_assignment(user_id=self.resource_id) if self._stored_properties_data.get(self.GROUPS) is not None: self._remove_user_from_groups(self.resource_id, [ self.client_plugin().get_group_id(group) for group in self._stored_properties_data.get(self.GROUPS) ]) self._delete_user(user_id=self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex)
class StructuredDeployment(sd.SoftwareDeployment): """A resource which has same logic with OS::Heat::SoftwareDeployment. A deployment resource like OS::Heat::SoftwareDeployment, but which performs input value substitution on the config defined by a OS::Heat::StructuredConfig resource. Some configuration tools have no concept of inputs, so the input value substitution needs to occur in the deployment resource. An example of this is the JSON metadata consumed by the cfn-init tool. Where the config contains {get_input: input_name} this will be substituted with the value of input_name in this resource's input_values. If get_input needs to be passed through to the substituted configuration then a different input_key property value can be specified. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = (CONFIG, SERVER, INPUT_VALUES, DEPLOY_ACTIONS, NAME, SIGNAL_TRANSPORT, INPUT_KEY, INPUT_VALUES_VALIDATE) = ( sd.SoftwareDeployment.CONFIG, sd.SoftwareDeployment.SERVER, sd.SoftwareDeployment.INPUT_VALUES, sd.SoftwareDeployment.DEPLOY_ACTIONS, sd.SoftwareDeployment.NAME, sd.SoftwareDeployment.SIGNAL_TRANSPORT, 'input_key', 'input_values_validate') _sd_ps = sd.SoftwareDeployment.properties_schema properties_schema = { CONFIG: _sd_ps[CONFIG], SERVER: _sd_ps[SERVER], INPUT_VALUES: _sd_ps[INPUT_VALUES], DEPLOY_ACTIONS: _sd_ps[DEPLOY_ACTIONS], SIGNAL_TRANSPORT: _sd_ps[SIGNAL_TRANSPORT], NAME: _sd_ps[NAME], INPUT_KEY: properties.Schema( properties.Schema.STRING, _('Name of key to use for substituting inputs during deployment.'), default='get_input', ), INPUT_VALUES_VALIDATE: properties.Schema( properties.Schema.STRING, _('Perform a check on the input values passed to verify that ' 'each required input has a corresponding value. ' 'When the property is set to STRICT and no value is passed, ' 'an exception is raised.'), default='LAX', constraints=[ constraints.AllowedValues(['LAX', 'STRICT']), ], ) } def empty_config(self): return {} def _build_derived_config(self, action, source, derived_inputs, derived_options): cfg = source.get(sc.SoftwareConfig.CONFIG) input_key = self.properties[self.INPUT_KEY] check_input_val = self.properties[self.INPUT_VALUES_VALIDATE] inputs = dict(i.input_data() for i in derived_inputs) return self.parse(inputs, input_key, cfg, check_input_val) @staticmethod def get_input_key_arg(snippet, input_key): if len(snippet) != 1: return None fn_name, fn_arg = next(six.iteritems(snippet)) if (fn_name == input_key and isinstance(fn_arg, six.string_types)): return fn_arg @staticmethod def get_input_key_value(fn_arg, inputs, check_input_val='LAX'): if check_input_val == 'STRICT' and fn_arg not in inputs: raise exception.UserParameterMissing(key=fn_arg) return inputs.get(fn_arg) @staticmethod def parse(inputs, input_key, snippet, check_input_val='LAX'): parse = functools.partial(StructuredDeployment.parse, inputs, input_key, check_input_val=check_input_val) if isinstance(snippet, collections.Mapping): fn_arg = StructuredDeployment.get_input_key_arg(snippet, input_key) if fn_arg is not None: return StructuredDeployment.get_input_key_value( fn_arg, inputs, check_input_val) return dict((k, parse(v)) for k, v in six.iteritems(snippet)) elif (not isinstance(snippet, six.string_types) and isinstance(snippet, collections.Iterable)): return [parse(v) for v in snippet] else: return snippet
class ExtraRoute(neutron.NeutronResource): """Resource for specifying extra routes for Neutron router. Resource allows to specify nexthop IP and destination network for router. """ required_service_extension = 'extraroute' support_status = support.SupportStatus( status=support.UNSUPPORTED, message=_('Use this resource at your own risk.')) PROPERTIES = ( ROUTER_ID, DESTINATION, NEXTHOP, ) = ( 'router_id', 'destination', 'nexthop', ) properties_schema = { ROUTER_ID: properties.Schema( properties.Schema.STRING, description=_('The router id.'), required=True, constraints=[ constraints.CustomConstraint('neutron.router') ] ), DESTINATION: properties.Schema( properties.Schema.STRING, description=_('Network in CIDR notation.'), required=True), NEXTHOP: properties.Schema( properties.Schema.STRING, description=_('Nexthop IP address.'), required=True) } def add_dependencies(self, deps): super(ExtraRoute, self).add_dependencies(deps) for resource in self.stack.values(): # depend on any RouterInterface in this template with the same # router_id as this router_id if resource.has_interface('OS::Neutron::RouterInterface'): try: router_id = self.properties[self.ROUTER_ID] dep_router_id = resource.properties.get( router.RouterInterface.ROUTER) except (ValueError, TypeError): # Properties errors will be caught later in validation, # where we can report them in their proper context. continue if dep_router_id == router_id: deps += (self, resource) # depend on any RouterGateway in this template with the same # router_id as this router_id elif resource.has_interface('OS::Neutron::RouterGateway'): try: router_id = self.properties[self.ROUTER_ID] dep_router_id = resource.properties.get( router.RouterGateway.ROUTER_ID) except (ValueError, TypeError): # Properties errors will be caught later in validation, # where we can report them in their proper context. continue if dep_router_id == router_id: deps += (self, resource) def handle_create(self): router_id = self.properties.get(self.ROUTER_ID) routes = self.client().show_router( router_id).get('router').get('routes') if not routes: routes = [] new_route = {'destination': self.properties[self.DESTINATION], 'nexthop': self.properties[self.NEXTHOP]} if new_route in routes: msg = _('Route duplicates an existing route.') raise exception.Error(msg) routes.append(new_route.copy()) self.client().update_router(router_id, {'router': {'routes': routes}}) new_route['router_id'] = router_id self.resource_id_set( '%(router_id)s:%(destination)s:%(nexthop)s' % new_route) def handle_delete(self): if not self.resource_id: return router_id = self.properties[self.ROUTER_ID] with self.client_plugin().ignore_not_found: routes = self.client().show_router( router_id).get('router').get('routes', []) try: routes.remove( {'destination': self.properties[self.DESTINATION], 'nexthop': self.properties[self.NEXTHOP]} ) except ValueError: return self.client().update_router(router_id, {'router': {'routes': routes}})
class RouterGateway(neutron.NeutronResource): support_status = support.SupportStatus( status=support.HIDDEN, message=_('Use the `external_gateway_info` property in ' 'the router resource to set up the gateway.'), version='5.0.0', previous_status=support.SupportStatus(status=support.DEPRECATED, version='2014.1')) PROPERTIES = ( ROUTER_ID, NETWORK_ID, NETWORK, ) = ('router_id', 'network_id', 'network') properties_schema = { ROUTER_ID: properties.Schema( properties.Schema.STRING, _('ID of the router.'), required=True, constraints=[constraints.CustomConstraint('neutron.router')]), NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % NETWORK, version='9.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.network')], ), NETWORK: properties.Schema( properties.Schema.STRING, _('external network for the gateway.'), constraints=[constraints.CustomConstraint('neutron.network')], ), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.NETWORK], value_path=[self.NETWORK_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network') ] def add_dependencies(self, deps): super(RouterGateway, self).add_dependencies(deps) for resource in six.itervalues(self.stack): # depend on any RouterInterface in this template with the same # router_id as this router_id if resource.has_interface('OS::Neutron::RouterInterface'): try: dep_router_id = resource.properties[RouterInterface.ROUTER] router_id = self.properties[self.ROUTER_ID] except (ValueError, TypeError): # Properties errors will be caught later in validation, # where we can report them in their proper context. continue if dep_router_id == router_id: deps += (self, resource) # depend on any subnet in this template with the same network_id # as this network_id, as the gateway implicitly creates a port # on that subnet if resource.has_interface('OS::Neutron::Subnet'): try: dep_network = resource.properties[subnet.Subnet.NETWORK] network = self.properties[self.NETWORK] except (ValueError, TypeError): # Properties errors will be caught later in validation, # where we can report them in their proper context. continue if dep_network == network: deps += (self, resource) def handle_create(self): router_id = self.properties[self.ROUTER_ID] network_id = dict(self.properties).get(self.NETWORK) self.client().add_gateway_router(router_id, {'network_id': network_id}) self.resource_id_set('%s:%s' % (router_id, network_id)) def handle_delete(self): if not self.resource_id: return (router_id, network_id) = self.resource_id.split(':') with self.client_plugin().ignore_not_found: self.client().remove_gateway_router(router_id)
class Pool(neutron.NeutronResource): """A resource for managing load balancer pools in Neutron.""" required_service_extension = 'lbaas' PROPERTIES = ( PROTOCOL, SUBNET_ID, SUBNET, LB_METHOD, NAME, DESCRIPTION, ADMIN_STATE_UP, VIP, MONITORS, PROVIDER, ) = ( 'protocol', 'subnet_id', 'subnet', 'lb_method', 'name', 'description', 'admin_state_up', 'vip', 'monitors', 'provider', ) _VIP_KEYS = ( VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS, VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT, VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP, ) = ( 'name', 'description', 'subnet', 'address', 'connection_limit', 'protocol_port', 'session_persistence', 'admin_state_up', ) _VIP_SESSION_PERSISTENCE_KEYS = ( VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME, ) = ( 'type', 'cookie_name', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, NAME_ATTR, PROTOCOL_ATTR, SUBNET_ID_ATTR, LB_METHOD_ATTR, DESCRIPTION_ATTR, TENANT_ID, VIP_ATTR, PROVIDER_ATTR, ) = ( 'admin_state_up', 'name', 'protocol', 'subnet_id', 'lb_method', 'description', 'tenant_id', 'vip', 'provider', ) properties_schema = { PROTOCOL: properties.Schema(properties.Schema.STRING, _('Protocol for balancing.'), required=True, constraints=[ constraints.AllowedValues( ['TCP', 'HTTP', 'HTTPS']), ]), SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % SUBNET, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.subnet')]), SUBNET: properties.Schema( properties.Schema.STRING, _('The subnet for the port on which the members ' 'of the pool will be connected.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.subnet')]), LB_METHOD: properties.Schema( properties.Schema.STRING, _('The algorithm used to distribute load between the members of ' 'the pool.'), required=True, constraints=[ constraints.AllowedValues( ['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']), ], update_allowed=True), NAME: properties.Schema(properties.Schema.STRING, _('Name of the pool.')), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description of the pool.'), update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of this pool.'), default=True, update_allowed=True), PROVIDER: properties.Schema( properties.Schema.STRING, _('LBaaS provider to implement this load balancer instance.'), support_status=support.SupportStatus(version='5.0.0'), constraints=[constraints.CustomConstraint('neutron.lb.provider')], ), VIP: properties.Schema( properties.Schema.MAP, _('IP address and port of the pool.'), schema={ VIP_NAME: properties.Schema(properties.Schema.STRING, _('Name of the vip.')), VIP_DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description of the vip.')), VIP_SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet of the vip.'), constraints=[ constraints.CustomConstraint('neutron.subnet') ]), VIP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the vip.'), constraints=[constraints.CustomConstraint('ip_addr')]), VIP_CONNECTION_LIMIT: properties.Schema( properties.Schema.INTEGER, _('The maximum number of connections per second ' 'allowed for the vip.')), VIP_PROTOCOL_PORT: properties.Schema( properties.Schema.INTEGER, _('TCP port on which to listen for client traffic ' 'that is associated with the vip address.'), required=True), VIP_SESSION_PERSISTENCE: properties.Schema( properties.Schema.MAP, _('Configuration of session persistence.'), schema={ VIP_SESSION_PERSISTENCE_TYPE: properties.Schema( properties.Schema.STRING, _('Method of implementation of session ' 'persistence feature.'), required=True, constraints=[ constraints.AllowedValues( ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']) ]), VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema( properties.Schema.STRING, _('Name of the cookie, ' 'required if type is APP_COOKIE.')) }), VIP_ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of this vip.'), default=True), }, required=True), MONITORS: properties.Schema( properties.Schema.LIST, _('List of health monitors associated with the pool.'), default=[], update_allowed=True), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema(_('The administrative state of this pool.'), type=attributes.Schema.STRING), NAME_ATTR: attributes.Schema(_('Name of the pool.'), type=attributes.Schema.STRING), PROTOCOL_ATTR: attributes.Schema(_('Protocol to balance.'), type=attributes.Schema.STRING), SUBNET_ID_ATTR: attributes.Schema(_( 'The subnet for the port on which the members of the pool ' 'will be connected.'), type=attributes.Schema.STRING), LB_METHOD_ATTR: attributes.Schema(_( 'The algorithm used to distribute load between the members ' 'of the pool.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('Description of the pool.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_('Tenant owning the pool.'), type=attributes.Schema.STRING), VIP_ATTR: attributes.Schema(_('Vip associated with the pool.'), type=attributes.Schema.MAP), PROVIDER_ATTR: attributes.Schema( _('Provider implementing this load balancer instance.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING, ), } def translation_rules(self, props): return [ properties.TranslationRule(props, properties.TranslationRule.REPLACE, [self.SUBNET], value_path=[self.SUBNET_ID]) ] def validate(self): res = super(Pool, self).validate() if res: return res session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE) if session_p is None: # session persistence is not configured, skip validation return persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE] if persistence_type == 'APP_COOKIE': if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME): return msg = _('Property cookie_name is required, when ' 'session_persistence type is set to APP_COOKIE.') raise exception.StackValidationFailed(message=msg) def handle_create(self): properties = self.prepare_properties(self.properties, self.physical_resource_name()) self.client_plugin().resolve_subnet(properties, self.SUBNET, 'subnet_id') vip_properties = properties.pop(self.VIP) monitors = properties.pop(self.MONITORS) pool = self.client().create_pool({'pool': properties})['pool'] self.resource_id_set(pool['id']) for monitor in monitors: self.client().associate_health_monitor( pool['id'], {'health_monitor': { 'id': monitor }}) vip_arguments = self.prepare_properties(vip_properties, '%s.vip' % (self.name, )) session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE) if session_p is not None: prepared_props = self.prepare_properties(session_p, None) vip_arguments['session_persistence'] = prepared_props vip_arguments['protocol'] = self.properties[self.PROTOCOL] if vip_arguments.get(self.VIP_SUBNET) is None: vip_arguments['subnet_id'] = properties[self.SUBNET_ID] else: vip_arguments['subnet_id'] = self.client_plugin().resolve_subnet( vip_arguments, self.VIP_SUBNET, 'subnet_id') vip_arguments['pool_id'] = pool['id'] vip = self.client().create_vip({'vip': vip_arguments})['vip'] self.metadata_set({'vip': vip['id']}) def _show_resource(self): return self.client().show_pool(self.resource_id)['pool'] def check_create_complete(self, data): attributes = self._show_resource() status = attributes['status'] if status == 'PENDING_CREATE': return False elif status == 'ACTIVE': vip_attributes = self.client().show_vip( self.metadata_get()['vip'])['vip'] vip_status = vip_attributes['status'] if vip_status == 'PENDING_CREATE': return False if vip_status == 'ACTIVE': return True if vip_status == 'ERROR': raise exception.ResourceInError( resource_status=vip_status, status_reason=_('error in vip')) raise exception.ResourceUnknownStatus( resource_status=vip_status, result=_('Pool creation failed due to vip')) elif status == 'ERROR': raise exception.ResourceInError(resource_status=status, status_reason=_('error in pool')) else: raise exception.ResourceUnknownStatus( resource_status=status, result=_('Pool creation failed')) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: if self.MONITORS in prop_diff: monitors = set(prop_diff.pop(self.MONITORS)) old_monitors = set(self.properties[self.MONITORS]) for monitor in old_monitors - monitors: self.client().disassociate_health_monitor( self.resource_id, monitor) for monitor in monitors - old_monitors: self.client().associate_health_monitor( self.resource_id, {'health_monitor': { 'id': monitor }}) if prop_diff: self.client().update_pool(self.resource_id, {'pool': prop_diff}) def _resolve_attribute(self, name): if name == self.VIP_ATTR: return self.client().show_vip(self.metadata_get()['vip'])['vip'] return super(Pool, self)._resolve_attribute(name) def handle_delete(self): if not self.resource_id: prg = progress.PoolDeleteProgress(True) return prg prg = progress.PoolDeleteProgress() if not self.metadata_get(): prg.vip['delete_called'] = True prg.vip['deleted'] = True return prg def _delete_vip(self): return self._not_found_in_call(self.client().delete_vip, self.metadata_get()['vip']) def _check_vip_deleted(self): return self._not_found_in_call(self.client().show_vip, self.metadata_get()['vip']) def _delete_pool(self): return self._not_found_in_call(self.client().delete_pool, self.resource_id) def check_delete_complete(self, prg): if not prg.vip['delete_called']: prg.vip['deleted'] = self._delete_vip() prg.vip['delete_called'] = True return False if not prg.vip['deleted']: prg.vip['deleted'] = self._check_vip_deleted() return False if not prg.pool['delete_called']: prg.pool['deleted'] = self._delete_pool() prg.pool['delete_called'] = True return prg.pool['deleted'] if not prg.pool['deleted']: prg.pool['deleted'] = super(Pool, self).check_delete_complete(True) return prg.pool['deleted'] return True
class SwiftSignal(resource.Resource): support_status = support.SupportStatus(version='2014.2') default_client_name = "swift" PROPERTIES = ( HANDLE, TIMEOUT, COUNT, ) = ( 'handle', 'timeout', 'count', ) properties_schema = { HANDLE: properties.Schema(properties.Schema.STRING, required=True, description=_( 'URL of TempURL where resource will signal ' 'completion and optionally upload data.')), TIMEOUT: properties.Schema( properties.Schema.NUMBER, description=_('The maximum number of seconds to wait for the ' 'resource to signal completion. Once the timeout ' 'is reached, creation of the signal resource will ' 'fail.'), required=True, constraints=[ constraints.Range(1, 43200), ]), COUNT: properties.Schema(properties.Schema.INTEGER, description=_( 'The number of success signals that must be ' 'received before the stack creation process ' 'continues.'), default=1, constraints=[ constraints.Range(1, 1000), ]) } ATTRIBUTES = (DATA) = 'data' attributes_schema = { DATA: attributes.Schema( _('JSON data that was uploaded via the SwiftSignalHandle.'), type=attributes.Schema.STRING) } WAIT_STATUSES = ( STATUS_FAILURE, STATUS_SUCCESS, ) = ( 'FAILURE', 'SUCCESS', ) METADATA_KEYS = (DATA, REASON, STATUS, UNIQUE_ID) = ('data', 'reason', 'status', 'id') def __init__(self, name, json_snippet, stack): super(SwiftSignal, self).__init__(name, json_snippet, stack) self._obj_name = None self._url = None @property def url(self): if not self._url: self._url = parse.urlparse(self.properties[self.HANDLE]) return self._url @property def obj_name(self): if not self._obj_name: self._obj_name = self.url.path.split('/')[4] return self._obj_name def _validate_handle_url(self): parts = self.url.path.split('/') msg = _('"%(url)s" is not a valid SwiftSignalHandle. The %(part)s ' 'is invalid') cplugin = self.client_plugin() if not cplugin.is_valid_temp_url_path(self.url.path): raise ValueError(msg % { 'url': self.url.path, 'part': 'Swift TempURL path' }) if not parts[3] == self.stack.id: raise ValueError(msg % { 'url': self.url.path, 'part': 'container name' }) def handle_create(self): self._validate_handle_url() started_at = timeutils.utcnow() return started_at, float(self.properties[self.TIMEOUT]) def get_signals(self): try: container = self.client().get_container(self.stack.id) except Exception as exc: self.client_plugin().ignore_not_found(exc) LOG.debug("Swift container %s was not found" % self.stack.id) return [] index = container[1] if not index: LOG.debug("Swift objects in container %s were not found" % self.stack.id) return [] # Remove objects in that are for other handle resources, since # multiple SwiftSignalHandle resources in the same stack share # a container filtered = [obj for obj in index if self.obj_name in obj['name']] # Fetch objects from Swift and filter results obj_bodies = [] for obj in filtered: try: signal = self.client().get_object(self.stack.id, obj['name']) except Exception as exc: self.client_plugin().ignore_not_found(exc) continue body = signal[1] if body == swift.IN_PROGRESS: # Ignore the initial object continue if body == "": obj_bodies.append({}) continue try: obj_bodies.append(jsonutils.loads(body)) except ValueError: raise exception.Error( _("Failed to parse JSON data: %s") % body) # Set default values on each signal signals = [] signal_num = 1 for signal in obj_bodies: # Remove previous signals with the same ID sig_id = self.UNIQUE_ID ids = [s.get(sig_id) for s in signals if sig_id in s] if ids and sig_id in signal and ids.count(signal[sig_id]) > 0: [ signals.remove(s) for s in signals if s.get(sig_id) == signal[sig_id] ] # Make sure all fields are set, since all are optional signal.setdefault(self.DATA, None) unique_id = signal.setdefault(sig_id, signal_num) reason = 'Signal %s received' % unique_id signal.setdefault(self.REASON, reason) signal.setdefault(self.STATUS, self.STATUS_SUCCESS) signals.append(signal) signal_num += 1 return signals def get_status(self): return [s[self.STATUS] for s in self.get_signals()] def get_status_reason(self, status): return [ s[self.REASON] for s in self.get_signals() if s[self.STATUS] == status ] def get_data(self): signals = self.get_signals() if not signals: return None data = {} for signal in signals: data[signal[self.UNIQUE_ID]] = signal[self.DATA] return data def check_create_complete(self, create_data): if timeutils.is_older_than(*create_data): raise SwiftSignalTimeout(self) statuses = self.get_status() if not statuses: return False for status in statuses: if status == self.STATUS_FAILURE: failure = SwiftSignalFailure(self) LOG.info(_LI('%(name)s Failed (%(failure)s)'), { 'name': str(self), 'failure': str(failure) }) raise failure elif status != self.STATUS_SUCCESS: raise exception.Error(_("Unknown status: %s") % status) if len(statuses) >= self.properties[self.COUNT]: LOG.info(_LI("%s Succeeded"), str(self)) return True return False def _resolve_attribute(self, key): if key == self.DATA: return six.text_type(jsonutils.dumps(self.get_data()))
class SwiftSignalHandle(resource.Resource): support_status = support.SupportStatus(version='2014.2') default_client_name = "swift" properties_schema = {} ATTRIBUTES = ( TOKEN, ENDPOINT, CURL_CLI, ) = ( 'token', 'endpoint', 'curl_cli', ) attributes_schema = { TOKEN: attributes.Schema(_( 'Tokens are not needed for Swift TempURLs. This attribute is ' 'being kept for compatibility with the ' 'OS::Heat::WaitConditionHandle resource'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), ENDPOINT: attributes.Schema( _('Endpoint/url which can be used for signalling handle'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), CURL_CLI: attributes.Schema(_( 'Convenience attribute, provides curl CLI command ' 'prefix, which can be used for signalling handle completion or ' 'failure. You can signal success by adding ' '--data-binary \'{"status": "SUCCESS"}\' ' ', or signal failure by adding ' '--data-binary \'{"status": "FAILURE"}\''), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING), } def handle_create(self): cplugin = self.client_plugin() url = cplugin.get_signal_url(self.stack.id, self.physical_resource_name()) self.data_set(self.ENDPOINT, url) self.resource_id_set(self.physical_resource_name()) def _resolve_attribute(self, key): if self.resource_id: if key == self.TOKEN: return '' # HeatWaitConditionHandle compatibility elif key == self.ENDPOINT: return self.data().get(self.ENDPOINT) elif key == self.CURL_CLI: return ("curl -i -X PUT '%s'" % self.data().get(self.ENDPOINT)) def handle_delete(self): cplugin = self.client_plugin() client = cplugin.client() # Delete all versioned objects while True: try: client.delete_object(self.stack.id, self.physical_resource_name()) except Exception as exc: cplugin.ignore_not_found(exc) break # Delete the container if it is empty try: client.delete_container(self.stack.id) except Exception as exc: if cplugin.is_not_found(exc) or cplugin.is_conflict(exc): pass else: raise self.data_delete(self.ENDPOINT) def get_reference_id(self): return self.data().get(self.ENDPOINT)
class HeatWaitCondition(resource.Resource): """Resource for handling signals received by WaitConditionHandle. Resource takes WaitConditionHandle and starts to create. Resource is in CREATE_IN_PROGRESS status until WaitConditionHandle doesn't receive sufficient number of successful signals (this number can be specified with count property) and successfully creates after that, or fails due to timeout. """ support_status = support.SupportStatus(version='2014.2') PROPERTIES = ( HANDLE, TIMEOUT, COUNT, ) = ( 'handle', 'timeout', 'count', ) ATTRIBUTES = ( DATA, ) = ( 'data', ) properties_schema = { HANDLE: properties.Schema( properties.Schema.STRING, _('A reference to the wait condition handle used to signal this ' 'wait condition.'), required=True ), TIMEOUT: properties.Schema( properties.Schema.NUMBER, _('The number of seconds to wait for the correct number of ' 'signals to arrive.'), required=True, constraints=[ constraints.Range(1, 43200), ] ), COUNT: properties.Schema( properties.Schema.INTEGER, _('The number of success signals that must be received before ' 'the stack creation process continues.'), constraints=[ constraints.Range(min=1), ], default=1, update_allowed=True ), } attributes_schema = { DATA: attributes.Schema( _('JSON string containing data associated with wait ' 'condition signals sent to the handle.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING ), } def _get_handle_resource(self): return self.stack.resource_by_refid(self.properties[self.HANDLE]) def _validate_handle_resource(self, handle): if handle is not None and isinstance( handle, wc_base.BaseWaitConditionHandle): return hn = handle.name if handle else self.properties[self.HANDLE] msg = _('%s is not a valid wait condition handle.') % hn raise ValueError(msg) def _wait(self, handle, started_at, timeout_in): if timeutils.is_older_than(started_at, timeout_in): exc = wc_base.WaitConditionTimeout(self, handle) LOG.info('%(name)s Timed out (%(timeout)s)', {'name': str(self), 'timeout': str(exc)}) raise exc handle_status = handle.get_status() if any(s != handle.STATUS_SUCCESS for s in handle_status): failure = wc_base.WaitConditionFailure(self, handle) LOG.info('%(name)s Failed (%(failure)s)', {'name': str(self), 'failure': str(failure)}) raise failure if len(handle_status) >= self.properties[self.COUNT]: LOG.info("%s Succeeded", str(self)) return True return False def handle_create(self): handle = self._get_handle_resource() self._validate_handle_resource(handle) started_at = timeutils.utcnow() return handle, started_at, float(self.properties[self.TIMEOUT]) def check_create_complete(self, data): return self._wait(*data) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.properties = json_snippet.properties(self.properties_schema, self.context) handle = self._get_handle_resource() started_at = timeutils.utcnow() return handle, started_at, float(self.properties[self.TIMEOUT]) def check_update_complete(self, data): return self._wait(*data) def handle_delete(self): handle = self._get_handle_resource() if handle: handle.metadata_set({}) def _resolve_attribute(self, key): handle = self._get_handle_resource() if handle is None: return '' if key == self.DATA: meta = handle.metadata_get(refresh=True) res = {k: meta[k][handle.DATA] for k in meta} LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s' % {'name': self.name, 'key': key, 'res': res}) return six.text_type(jsonutils.dumps(res))
class NetworkGateway(neutron.NeutronResource): """Network Gateway resource in Neutron Network Gateway.""" support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, DEVICES, CONNECTIONS, ) = ( 'name', 'devices', 'connections', ) ATTRIBUTES = (DEFAULT, ) = ('default', ) _DEVICES_KEYS = ( ID, INTERFACE_NAME, ) = ( 'id', 'interface_name', ) _CONNECTIONS_KEYS = ( NETWORK_ID, NETWORK, SEGMENTATION_TYPE, SEGMENTATION_ID, ) = ( 'network_id', 'network', 'segmentation_type', 'segmentation_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, description=_('The name of the network gateway.'), update_allowed=True), DEVICES: properties.Schema( properties.Schema.LIST, description=_('Device info for this network gateway.'), required=True, constraints=[constraints.Length(min=1)], update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ ID: properties.Schema(properties.Schema.STRING, description=_( 'The device id for the network ' 'gateway.'), required=True), INTERFACE_NAME: properties.Schema(properties.Schema.STRING, description=_( 'The interface name for the ' 'network gateway.'), required=True) })), CONNECTIONS: properties.Schema( properties.Schema.LIST, description=_('Connection info for this network gateway.'), default={}, update_allowed=True, schema=properties.Schema( properties.Schema.MAP, schema={ NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % NETWORK, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[ constraints.CustomConstraint('neutron.network') ], ), NETWORK: properties.Schema( properties.Schema.STRING, description=_('The internal network to connect on ' 'the network gateway.'), support_status=support.SupportStatus(version='2014.2'), constraints=[ constraints.CustomConstraint('neutron.network') ], ), SEGMENTATION_TYPE: properties.Schema( properties.Schema.STRING, description=_( 'L2 segmentation strategy on the external ' 'side of the network gateway.'), default='flat', constraints=[ constraints.AllowedValues(('flat', 'vlan')) ]), SEGMENTATION_ID: properties.Schema( properties.Schema.INTEGER, description=_( 'The id for L2 segment on the external side ' 'of the network gateway. Must be specified ' 'when using vlan.'), constraints=[constraints.Range(0, 4094)]) })) } attributes_schema = { DEFAULT: attributes.Schema(_("A boolean value of default flag."), type=attributes.Schema.STRING), } def translation_rules(self): return [ properties.TranslationRule(self.properties, properties.TranslationRule.REPLACE, [self.CONNECTIONS, self.NETWORK], value_name=self.NETWORK_ID) ] def _show_resource(self): return self.client().show_network_gateway( self.resource_id)['network_gateway'] def validate(self): """Validate any of the provided params.""" super(NetworkGateway, self).validate() connections = self.properties[self.CONNECTIONS] for connection in connections: self._validate_depr_property_required(connection, self.NETWORK, self.NETWORK_ID) segmentation_type = connection[self.SEGMENTATION_TYPE] segmentation_id = connection.get(self.SEGMENTATION_ID) if segmentation_type == 'vlan' and segmentation_id is None: msg = _("segmentation_id must be specified for using vlan") raise exception.StackValidationFailed(message=msg) if segmentation_type == 'flat' and segmentation_id: msg = _("segmentation_id cannot be specified except 0 for " "using flat") raise exception.StackValidationFailed(message=msg) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) connections = props.pop(self.CONNECTIONS) ret = self.client().create_network_gateway({'network_gateway': props})['network_gateway'] self.resource_id_set(ret['id']) for connection in connections: self.client_plugin().resolve_network(connection, self.NETWORK, 'network_id') if self.NETWORK in six.iterkeys(connection): connection.pop(self.NETWORK) self.client().connect_network_gateway(ret['id'], connection) def handle_delete(self): if not self.resource_id: return connections = self.properties[self.CONNECTIONS] for connection in connections: try: self.client_plugin().resolve_network(connection, self.NETWORK, 'network_id') if self.NETWORK in six.iterkeys(connection): connection.pop(self.NETWORK) self.client().disconnect_network_gateway( self.resource_id, connection) except Exception as ex: self.client_plugin().ignore_not_found(ex) try: self.client().delete_network_gateway(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): props = self.prepare_update_properties(json_snippet) connections = props.pop(self.CONNECTIONS) if self.DEVICES in prop_diff: self.handle_delete() self.properties.data.update(props) self.handle_create() return else: props.pop(self.DEVICES, None) if self.NAME in prop_diff: self.client().update_network_gateway(self.resource_id, {'network_gateway': props}) if self.CONNECTIONS in prop_diff: for connection in self.properties[self.CONNECTIONS]: try: self.client_plugin().resolve_network( connection, self.NETWORK, 'network_id') if self.NETWORK in six.iterkeys(connection): connection.pop(self.NETWORK) self.client().disconnect_network_gateway( self.resource_id, connection) except Exception as ex: self.client_plugin().ignore_not_found(ex) for connection in connections: self.client_plugin().resolve_network(connection, self.NETWORK, 'network_id') if self.NETWORK in six.iterkeys(connection): connection.pop(self.NETWORK) self.client().connect_network_gateway(self.resource_id, connection)
class Port(neutron.NeutronResource): """A resource for managing Neutron ports. A port represents a virtual switch port on a logical network switch. Virtual instances attach their interfaces into ports. The logical port also defines the MAC address and the IP address(es) to be assigned to the interfaces plugged into them. When IP addresses are associated to a port, this also implies the port is associated with a subnet, as the IP address was taken from the allocation pool for a specific subnet. """ entity = 'port' PROPERTIES = ( NAME, NETWORK_ID, NETWORK, FIXED_IPS, SECURITY_GROUPS, REPLACEMENT_POLICY, DEVICE_ID, DEVICE_OWNER, DNS_NAME, ) = ( 'name', 'network_id', 'network', 'fixed_ips', 'security_groups', 'replacement_policy', 'device_id', 'device_owner', 'dns_name', ) EXTRA_PROPERTIES = ( VALUE_SPECS, ADMIN_STATE_UP, MAC_ADDRESS, ALLOWED_ADDRESS_PAIRS, VNIC_TYPE, QOS_POLICY, PORT_SECURITY_ENABLED, ) = ( 'value_specs', 'admin_state_up', 'mac_address', 'allowed_address_pairs', 'binding:vnic_type', 'qos_policy', 'port_security_enabled', ) _FIXED_IP_KEYS = ( FIXED_IP_SUBNET_ID, FIXED_IP_SUBNET, FIXED_IP_IP_ADDRESS, ) = ( 'subnet_id', 'subnet', 'ip_address', ) _ALLOWED_ADDRESS_PAIR_KEYS = ( ALLOWED_ADDRESS_PAIR_MAC_ADDRESS, ALLOWED_ADDRESS_PAIR_IP_ADDRESS, ) = ( 'mac_address', 'ip_address', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, DEVICE_ID_ATTR, DEVICE_OWNER_ATTR, FIXED_IPS_ATTR, MAC_ADDRESS_ATTR, NAME_ATTR, NETWORK_ID_ATTR, SECURITY_GROUPS_ATTR, STATUS, TENANT_ID, ALLOWED_ADDRESS_PAIRS_ATTR, SUBNETS_ATTR, PORT_SECURITY_ENABLED_ATTR, QOS_POLICY_ATTR, DNS_ASSIGNMENT, ) = ( 'admin_state_up', 'device_id', 'device_owner', 'fixed_ips', 'mac_address', 'name', 'network_id', 'security_groups', 'status', 'tenant_id', 'allowed_address_pairs', 'subnets', 'port_security_enabled', 'qos_policy_id', 'dns_assignment', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('A symbolic name for this port.'), update_allowed=True), NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % NETWORK, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.network')], ), NETWORK: properties.Schema( properties.Schema.STRING, _('Network this port belongs to. If you plan to use current port ' 'to assign Floating IP, you should specify %(fixed_ips)s ' 'with %(subnet)s. Note if this changes to a different network ' 'update, the port will be replaced.') % { 'fixed_ips': FIXED_IPS, 'subnet': FIXED_IP_SUBNET }, support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.network')], ), DEVICE_ID: properties.Schema(properties.Schema.STRING, _('Device ID of this port.'), update_allowed=True), DEVICE_OWNER: properties.Schema(properties.Schema.STRING, _('Name of the network owning the port. ' 'The value is typically network:floatingip ' 'or network:router_interface or network:dhcp.'), update_allowed=True), FIXED_IPS: properties.Schema( properties.Schema.LIST, _('Desired IPs for this port.'), schema=properties.Schema( properties.Schema.MAP, schema={ FIXED_IP_SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % FIXED_IP_SUBNET, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2 ')), constraints=[ constraints.CustomConstraint('neutron.subnet') ]), FIXED_IP_SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet in which to allocate the IP address for ' 'this port.'), support_status=support.SupportStatus(version='2014.2'), constraints=[ constraints.CustomConstraint('neutron.subnet') ]), FIXED_IP_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address desired in the subnet for this port.'), constraints=[constraints.CustomConstraint('ip_addr')]), }, ), update_allowed=True), SECURITY_GROUPS: properties.Schema(properties.Schema.LIST, _('Security group IDs to associate with this port.'), update_allowed=True), REPLACEMENT_POLICY: properties.Schema( properties.Schema.STRING, _('Policy on how to respond to a stack-update for this resource. ' 'REPLACE_ALWAYS will replace the port regardless of any ' 'property changes. AUTO will update the existing port for any ' 'changed update-allowed property.'), default='AUTO', constraints=[ constraints.AllowedValues(['REPLACE_ALWAYS', 'AUTO']), ], update_allowed=True, support_status=support.SupportStatus( status=support.HIDDEN, version='9.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='6.0.0', message=_('Replacement policy used to work around flawed ' 'nova/neutron port interaction which has been ' 'fixed since Liberty.'), previous_status=support.SupportStatus(version='2014.2')))), DNS_NAME: properties.Schema( properties.Schema.STRING, _('DNS name associated with the port.'), update_allowed=True, constraints=[constraints.CustomConstraint('dns_name')], support_status=support.SupportStatus(version='7.0.0'), ), } # NOTE(prazumovsky): properties_schema has been separated because some # properties used in server for creating internal port. extra_properties_schema = { VALUE_SPECS: properties.Schema(properties.Schema.MAP, _('Extra parameters to include in the request.'), default={}, update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of this port.'), default=True, update_allowed=True), MAC_ADDRESS: properties.Schema( properties.Schema.STRING, _('MAC address to give to this port. The default update policy ' 'of this property in neutron is that allow admin role only.'), constraints=[constraints.CustomConstraint('mac_addr')], update_allowed=True, ), ALLOWED_ADDRESS_PAIRS: properties.Schema( properties.Schema.LIST, _('Additional MAC/IP address pairs allowed to pass through the ' 'port.'), schema=properties.Schema( properties.Schema.MAP, schema={ ALLOWED_ADDRESS_PAIR_MAC_ADDRESS: properties.Schema( properties.Schema.STRING, _('MAC address to allow through this port.'), constraints=[constraints.CustomConstraint('mac_addr') ]), ALLOWED_ADDRESS_PAIR_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address to allow through this port.'), required=True, constraints=[constraints.CustomConstraint('net_cidr') ]), }, ), update_allowed=True, ), VNIC_TYPE: properties.Schema( properties.Schema.STRING, _('The vnic type to be bound on the neutron port. ' 'To support SR-IOV PCI passthrough networking, you can request ' 'that the neutron port to be realized as normal (virtual nic), ' 'direct (pci passthrough), or macvtap ' '(virtual interface with a tap-like software interface). Note ' 'that this only works for Neutron deployments that support ' 'the bindings extension.'), constraints=[ constraints.AllowedValues([ 'normal', 'direct', 'macvtap', 'direct-physical', 'baremetal' ]), ], support_status=support.SupportStatus(version='2015.1'), update_allowed=True), PORT_SECURITY_ENABLED: properties.Schema( properties.Schema.BOOLEAN, _('Flag to enable/disable port security on the port. ' 'When disable this feature(set it to False), there will be no ' 'packages filtering, like security-group and address-pairs.'), update_allowed=True, support_status=support.SupportStatus(version='5.0.0')), QOS_POLICY: properties.Schema( properties.Schema.STRING, _('The name or ID of QoS policy to attach to this port.'), constraints=[constraints.CustomConstraint('neutron.qos_policy')], update_allowed=True, support_status=support.SupportStatus(version='6.0.0')), } # Need to update properties_schema with other properties before # initialisation, because resource should contain all properties before # creating. Also, documentation should correctly resolves resource # properties schema. properties_schema.update(extra_properties_schema) attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema(_("The administrative state of this port."), type=attributes.Schema.STRING), DEVICE_ID_ATTR: attributes.Schema(_("Unique identifier for the device."), type=attributes.Schema.STRING), DEVICE_OWNER: attributes.Schema(_("Name of the network owning the port."), type=attributes.Schema.STRING), FIXED_IPS_ATTR: attributes.Schema(_("Fixed IP addresses."), type=attributes.Schema.LIST), MAC_ADDRESS_ATTR: attributes.Schema(_("MAC address of the port."), type=attributes.Schema.STRING), NAME_ATTR: attributes.Schema(_("Friendly name of the port."), type=attributes.Schema.STRING), NETWORK_ID_ATTR: attributes.Schema( _("Unique identifier for the network owning the port."), type=attributes.Schema.STRING), SECURITY_GROUPS_ATTR: attributes.Schema(_("A list of security groups for the port."), type=attributes.Schema.LIST), STATUS: attributes.Schema(_("The status of the port."), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_("Tenant owning the port."), type=attributes.Schema.STRING), ALLOWED_ADDRESS_PAIRS_ATTR: attributes.Schema(_( "Additional MAC/IP address pairs allowed to pass through " "a port."), type=attributes.Schema.LIST), SUBNETS_ATTR: attributes.Schema(_("A list of all subnet attributes for the port."), type=attributes.Schema.LIST), PORT_SECURITY_ENABLED_ATTR: attributes.Schema( _("Port security enabled of the port."), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.BOOLEAN), QOS_POLICY_ATTR: attributes.Schema( _("The QoS policy ID attached to this port."), type=attributes.Schema.STRING, support_status=support.SupportStatus(version='6.0.0'), ), DNS_ASSIGNMENT: attributes.Schema( _("The DNS assigned to this port."), type=attributes.Schema.MAP, support_status=support.SupportStatus(version='7.0.0'), ), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.NETWORK], value_path=[self.NETWORK_ID]), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.FIXED_IPS, self.FIXED_IP_SUBNET], value_name=self.FIXED_IP_SUBNET_ID), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network'), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.FIXED_IPS, self.FIXED_IP_SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet') ] def add_dependencies(self, deps): super(Port, self).add_dependencies(deps) # Depend on any Subnet in this template with the same # network_id as this network_id. # It is not known which subnet a port might be assigned # to so all subnets in a network should be created before # the ports in that network. for res in six.itervalues(self.stack): if res.has_interface('OS::Neutron::Subnet'): dep_network = res.properties.get(subnet.Subnet.NETWORK) network = self.properties[self.NETWORK] if dep_network == network: deps += (self, res) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['network_id'] = props.pop(self.NETWORK) self._prepare_port_properties(props) qos_policy = props.pop(self.QOS_POLICY, None) if qos_policy: props['qos_policy_id'] = self.client_plugin().get_qos_policy_id( qos_policy) port = self.client().create_port({'port': props})['port'] self.resource_id_set(port['id']) def _prepare_port_properties(self, props, prepare_for_update=False): if self.FIXED_IPS in props: fixed_ips = props[self.FIXED_IPS] if fixed_ips: for fixed_ip in fixed_ips: for key, value in list(fixed_ip.items()): if value is None: fixed_ip.pop(key) if self.FIXED_IP_SUBNET in fixed_ip: fixed_ip['subnet_id'] = fixed_ip.pop( self.FIXED_IP_SUBNET) else: # Passing empty list would have created a port without # fixed_ips during CREATE and released the existing # fixed_ips during UPDATE (default neutron behaviour). # However, for backward compatibility we will let neutron # assign ip for CREATE and leave the assigned ips during # UPDATE by not passing it. ref bug #1538473. del props[self.FIXED_IPS] # delete empty MAC addresses so that Neutron validation code # wouldn't fail as it not accepts Nones if self.ALLOWED_ADDRESS_PAIRS in props: address_pairs = props[self.ALLOWED_ADDRESS_PAIRS] if address_pairs: for pair in address_pairs: if (self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS in pair and pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] is None): del pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] else: props[self.ALLOWED_ADDRESS_PAIRS] = [] # if without 'security_groups', don't set the 'security_groups' # property when creating, neutron will create the port with the # 'default' securityGroup. If has the 'security_groups' and the # value is [], which means to create the port without securityGroup. if self.SECURITY_GROUPS in props: if props.get(self.SECURITY_GROUPS) is not None: props[self.SECURITY_GROUPS] = self.client_plugin( ).get_secgroup_uuids(props.get(self.SECURITY_GROUPS)) else: # And the update should has the same behavior. if prepare_for_update: props[self.SECURITY_GROUPS] = self.client_plugin( ).get_secgroup_uuids(['default']) if self.REPLACEMENT_POLICY in props: del (props[self.REPLACEMENT_POLICY]) def check_create_complete(self, *args): attributes = self._show_resource() return self.is_built(attributes) def handle_delete(self): try: self.client().delete_port(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def _resolve_attribute(self, name): if self.resource_id is None: return if name == self.SUBNETS_ATTR: subnets = [] try: fixed_ips = self._show_resource().get('fixed_ips', []) for fixed_ip in fixed_ips: subnet_id = fixed_ip.get('subnet_id') if subnet_id: subnets.append( self.client().show_subnet(subnet_id)['subnet']) except Exception as ex: LOG.warning("Failed to fetch resource attributes: %s", ex) return return subnets return super(Port, self)._resolve_attribute(name) def needs_replace(self, after_props): """Mandatory replace based on props.""" return after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS' def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) if self.QOS_POLICY in prop_diff: qos_policy = prop_diff.pop(self.QOS_POLICY) prop_diff['qos_policy_id'] = self.client_plugin( ).get_qos_policy_id(qos_policy) if qos_policy else None self._prepare_port_properties(prop_diff, prepare_for_update=True) LOG.debug('updating port with %s', prop_diff) self.client().update_port(self.resource_id, {'port': prop_diff}) def check_update_complete(self, *args): attributes = self._show_resource() return self.is_built(attributes) def prepare_for_replace(self): # if the port has not been created yet, return directly if self.resource_id is None: return # store port fixed_ips for restoring after failed update fixed_ips = self._show_resource().get('fixed_ips', []) self.data_set('port_fip', jsonutils.dumps(fixed_ips)) # reset fixed_ips for this port by setting fixed_ips to [] props = {'fixed_ips': []} self.client().update_port(self.resource_id, {'port': props}) def restore_prev_rsrc(self, convergence=False): # In case of convergence, during rollback, the previous rsrc is # already selected and is being acted upon. backup_stack = self.stack._backup_stack() backup_res = backup_stack.resources.get(self.name) prev_port = self if convergence else backup_res fixed_ips = prev_port.data().get('port_fip', []) props = {'fixed_ips': []} if convergence: existing_port, rsrc_owning_stack, stack = resource.Resource.load( prev_port.context, prev_port.replaced_by, True, prev_port.stack.cache_data) existing_port_id = existing_port.resource_id else: existing_port_id = self.resource_id if existing_port_id: # reset fixed_ips to [] for new resource self.client().update_port(existing_port_id, {'port': props}) if fixed_ips and prev_port.resource_id: # restore ip for old port prev_port_props = {'fixed_ips': jsonutils.loads(fixed_ips)} self.client().update_port(prev_port.resource_id, {'port': prev_port_props})
class Firewall(neutron.NeutronResource): """A resource for the Firewall resource in Neutron FWaaS. Resource for using the Neutron firewall implementation. Firewall is a network security system that monitors and controls the incoming and outgoing network traffic based on predetermined security rules. """ required_service_extension = 'fwaas' PROPERTIES = ( NAME, DESCRIPTION, ADMIN_STATE_UP, FIREWALL_POLICY_ID, VALUE_SPECS, SHARED, ) = ( 'name', 'description', 'admin_state_up', 'firewall_policy_id', 'value_specs', 'shared', ) ATTRIBUTES = ( NAME_ATTR, DESCRIPTION_ATTR, ADMIN_STATE_UP_ATTR, FIREWALL_POLICY_ID_ATTR, SHARED_ATTR, STATUS, TENANT_ID, ) = ( 'name', 'description', 'admin_state_up', 'firewall_policy_id', 'shared', 'status', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('Name for the firewall.'), update_allowed=True), DESCRIPTION: properties.Schema(properties.Schema.STRING, _('Description for the firewall.'), update_allowed=True), ADMIN_STATE_UP: properties.Schema( properties.Schema.BOOLEAN, _('Administrative state of the firewall. If false (down), ' 'firewall does not forward packets and will drop all ' 'traffic to/from VMs behind the firewall.'), default=True, update_allowed=True), FIREWALL_POLICY_ID: properties.Schema( properties.Schema.STRING, _('The ID of the firewall policy that this firewall is ' 'associated with.'), required=True, update_allowed=True), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the request. Parameters ' 'are often specific to installed hardware or extensions.'), support_status=support.SupportStatus(version='5.0.0'), default={}, update_allowed=True), SHARED: properties.Schema( properties.Schema.BOOLEAN, _('Whether this firewall should be shared across all tenants. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only.'), update_allowed=True, support_status=support.SupportStatus( status=support.UNSUPPORTED, message=_('There is no such option during 5.0.0, so need to ' 'make this property unsupported while it not used.'), version='6.0.0', previous_status=support.SupportStatus(version='2015.1'))), } attributes_schema = { NAME_ATTR: attributes.Schema(_('Name for the firewall.'), type=attributes.Schema.STRING), DESCRIPTION_ATTR: attributes.Schema(_('Description of the firewall.'), type=attributes.Schema.STRING), ADMIN_STATE_UP_ATTR: attributes.Schema(_('The administrative state of the firewall.'), type=attributes.Schema.STRING), FIREWALL_POLICY_ID_ATTR: attributes.Schema(_( 'Unique identifier of the firewall policy used to create ' 'the firewall.'), type=attributes.Schema.STRING), SHARED_ATTR: attributes.Schema( _('Shared status of this firewall.'), support_status=support.SupportStatus( status=support.UNSUPPORTED, message=_('There is no such option during 5.0.0, so need to ' 'make this attribute unsupported, otherwise error ' 'will raised.'), version='6.0.0'), type=attributes.Schema.STRING), STATUS: attributes.Schema(_('The status of the firewall.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_('Id of the tenant owning the firewall.'), type=attributes.Schema.STRING), } def _show_resource(self): return self.client().show_firewall(self.resource_id)['firewall'] def check_create_complete(self, data): attributes = self._show_resource() status = attributes['status'] if status == 'PENDING_CREATE': return False elif status == 'ACTIVE': return True elif status == 'ERROR': raise exception.ResourceInError( resource_status=status, status_reason=_('Error in Firewall')) else: raise exception.ResourceUnknownStatus( resource_status=status, result=_('Firewall creation failed')) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) firewall = self.client().create_firewall({'firewall': props})['firewall'] self.resource_id_set(firewall['id']) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_firewall(self.resource_id, {'firewall': prop_diff}) def handle_delete(self): try: self.client().delete_firewall(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def _resolve_attribute(self, name): if name == self.SHARED_ATTR: return ('This attribute is currently unsupported in neutron ' 'firewall resource.') return super(Firewall, self)._resolve_attribute(name)
class MultipartMime(software_config.SoftwareConfig): """Assembles a collection of software configurations as a multi-part mime. Parts in the message can be populated with inline configuration or references to other config resources. If the referenced resource is itself a valid multi-part mime message, that will be broken into parts and those parts appended to this message. The resulting multi-part mime message will be stored by the configs API and can be referenced in properties such as OS::Nova::Server user_data. This resource is generally used to build a list of cloud-init configuration elements including scripts and cloud-config. Since cloud-init is boot-only configuration, any changes to the definition will result in the replacement of all servers which reference it. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = (PARTS, CONFIG, FILENAME, TYPE, SUBTYPE) = ('parts', 'config', 'filename', 'type', 'subtype') TYPES = (TEXT, MULTIPART) = ('text', 'multipart') properties_schema = { PARTS: properties.Schema( properties.Schema.LIST, _('Parts belonging to this message.'), default=[], schema=properties.Schema( properties.Schema.MAP, schema={ CONFIG: properties.Schema( properties.Schema.STRING, _('Content of part to attach, either inline or by ' 'referencing the ID of another software config ' 'resource.'), required=True), FILENAME: properties.Schema( properties.Schema.STRING, _('Optional filename to associate with part.')), TYPE: properties.Schema( properties.Schema.STRING, _('Whether the part content is text or multipart.'), default=TEXT, constraints=[constraints.AllowedValues(TYPES)]), SUBTYPE: properties.Schema( properties.Schema.STRING, _('Optional subtype to specify with the type.')), })) } message = None def handle_create(self): props = { rpc_api.SOFTWARE_CONFIG_NAME: self.physical_resource_name(), rpc_api.SOFTWARE_CONFIG_CONFIG: self.get_message(), rpc_api.SOFTWARE_CONFIG_GROUP: 'Heat::Ungrouped' } sc = self.rpc_client().create_software_config(self.context, **props) self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID]) def get_message(self): if self.message: return self.message subparts = [] for item in self.properties[self.PARTS]: config = item.get(self.CONFIG) part_type = item.get(self.TYPE, self.TEXT) part = config if uuidutils.is_uuid_like(config): with self.rpc_client().ignore_error_by_name('NotFound'): sc = self.rpc_client().show_software_config( self.context, config) part = sc[rpc_api.SOFTWARE_CONFIG_CONFIG] if part_type == self.MULTIPART: self._append_multiparts(subparts, part) else: filename = item.get(self.FILENAME, '') subtype = item.get(self.SUBTYPE, '') self._append_part(subparts, part, subtype, filename) mime_blob = multipart.MIMEMultipart(_subparts=subparts) self.message = mime_blob.as_string() return self.message @staticmethod def _append_multiparts(subparts, multi_part): multi_parts = email.message_from_string(multi_part) if not multi_parts or not multi_parts.is_multipart(): return for part in multi_parts.get_payload(): MultipartMime._append_part(subparts, part.get_payload(), part.get_content_subtype(), part.get_filename()) @staticmethod def _append_part(subparts, part, subtype, filename): if not subtype and filename: subtype = os.path.splitext(filename)[0] msg = MultipartMime._create_message(part, subtype, filename) subparts.append(msg) @staticmethod def _create_message(part, subtype, filename): charset = 'us-ascii' try: part.encode(charset) except UnicodeEncodeError: charset = 'utf-8' msg = (text.MIMEText(part, _subtype=subtype, _charset=charset) if subtype else text.MIMEText(part, _charset=charset)) if filename: msg.add_header('Content-Disposition', 'attachment', filename=filename) return msg
class Router(neutron.NeutronResource): """A resource that implements Neutron router. Router is a physical or virtual network device that passes network traffic between different networks. """ required_service_extension = 'router' entity = 'router' PROPERTIES = ( NAME, EXTERNAL_GATEWAY, VALUE_SPECS, ADMIN_STATE_UP, L3_AGENT_ID, L3_AGENT_IDS, DISTRIBUTED, HA, TAGS, ) = ( 'name', 'external_gateway_info', 'value_specs', 'admin_state_up', 'l3_agent_id', 'l3_agent_ids', 'distributed', 'ha', 'tags', ) _EXTERNAL_GATEWAY_KEYS = ( EXTERNAL_GATEWAY_NETWORK, EXTERNAL_GATEWAY_ENABLE_SNAT, EXTERNAL_GATEWAY_FIXED_IPS, ) = ( 'network', 'enable_snat', 'external_fixed_ips', ) _EXTERNAL_GATEWAY_FIXED_IPS_KEYS = (IP_ADDRESS, SUBNET) = ('ip_address', 'subnet') ATTRIBUTES = ( STATUS, EXTERNAL_GATEWAY_INFO_ATTR, NAME_ATTR, ADMIN_STATE_UP_ATTR, TENANT_ID, ) = ( 'status', 'external_gateway_info', 'name', 'admin_state_up', 'tenant_id', ) properties_schema = { NAME: properties.Schema(properties.Schema.STRING, _('The name of the router.'), update_allowed=True), EXTERNAL_GATEWAY: properties.Schema( properties.Schema.MAP, _('External network gateway configuration for a router.'), schema={ EXTERNAL_GATEWAY_NETWORK: properties.Schema( properties.Schema.STRING, _('ID or name of the external network for the gateway.'), required=True, update_allowed=True), EXTERNAL_GATEWAY_ENABLE_SNAT: properties.Schema( properties.Schema.BOOLEAN, _('Enables Source NAT on the router gateway. NOTE: The ' 'default policy setting in Neutron restricts usage of ' 'this property to administrative users only.'), update_allowed=True), EXTERNAL_GATEWAY_FIXED_IPS: properties.Schema( properties.Schema.LIST, _('External fixed IP addresses for the gateway.'), schema=properties.Schema( properties.Schema.MAP, schema={ IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('External fixed IP address.'), constraints=[ constraints.CustomConstraint('ip_addr'), ]), SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet of external fixed IP address.'), constraints=[ constraints.CustomConstraint( 'neutron.subnet') ]), }), update_allowed=True, support_status=support.SupportStatus(version='6.0.0')), }, update_allowed=True), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the creation request.'), default={}, update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of the router.'), default=True, update_allowed=True), L3_AGENT_ID: properties.Schema( properties.Schema.STRING, _('ID of the L3 agent. NOTE: The default policy setting in ' 'Neutron restricts usage of this property to administrative ' 'users only.'), update_allowed=True, support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2015.1', message=_('Use property %s.') % L3_AGENT_IDS, previous_status=support.SupportStatus(version='2014.1'))), ), L3_AGENT_IDS: properties.Schema( properties.Schema.LIST, _('ID list of the L3 agent. User can specify multi-agents ' 'for highly available router. NOTE: The default policy ' 'setting in Neutron restricts usage of this property to ' 'administrative users only.'), schema=properties.Schema(properties.Schema.STRING, ), update_allowed=True, support_status=support.SupportStatus(version='2015.1')), DISTRIBUTED: properties.Schema( properties.Schema.BOOLEAN, _('Indicates whether or not to create a distributed router. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only. This property ' 'can not be used in conjunction with the L3 agent ID.'), support_status=support.SupportStatus(version='2015.1')), HA: properties.Schema( properties.Schema.BOOLEAN, _('Indicates whether or not to create a highly available router. ' 'NOTE: The default policy setting in Neutron restricts usage ' 'of this property to administrative users only. And now neutron ' 'do not support distributed and ha at the same time.'), support_status=support.SupportStatus(version='2015.1')), TAGS: properties.Schema( properties.Schema.LIST, _('The tags to be added to the router.'), schema=properties.Schema(properties.Schema.STRING), update_allowed=True, support_status=support.SupportStatus(version='9.0.0')), } attributes_schema = { STATUS: attributes.Schema(_("The status of the router."), type=attributes.Schema.STRING), EXTERNAL_GATEWAY_INFO_ATTR: attributes.Schema(_("Gateway network for the router."), type=attributes.Schema.MAP), NAME_ATTR: attributes.Schema(_("Friendly name of the router."), type=attributes.Schema.STRING), ADMIN_STATE_UP_ATTR: attributes.Schema(_("Administrative state of the router."), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_("Tenant owning the router."), type=attributes.Schema.STRING), } def translation_rules(self, props): rules = [ translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network'), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [ self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_FIXED_IPS, self.SUBNET ], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet') ] if props.get(self.L3_AGENT_ID): rules.extend([ translation.TranslationRule(props, translation.TranslationRule.ADD, [self.L3_AGENT_IDS], [props.get(self.L3_AGENT_ID)]), translation.TranslationRule(props, translation.TranslationRule.DELETE, [self.L3_AGENT_ID]) ]) return rules def validate(self): super(Router, self).validate() is_distributed = self.properties[self.DISTRIBUTED] l3_agent_id = self.properties[self.L3_AGENT_ID] l3_agent_ids = self.properties[self.L3_AGENT_IDS] is_ha = self.properties[self.HA] if l3_agent_id and l3_agent_ids: raise exception.ResourcePropertyConflict(self.L3_AGENT_ID, self.L3_AGENT_IDS) # do not specific l3 agent when creating a distributed router if is_distributed and (l3_agent_id or l3_agent_ids): raise exception.ResourcePropertyConflict( self.DISTRIBUTED, "/".join([self.L3_AGENT_ID, self.L3_AGENT_IDS])) if is_ha and is_distributed: raise exception.ResourcePropertyConflict(self.DISTRIBUTED, self.HA) if not is_ha and l3_agent_ids and len(l3_agent_ids) > 1: msg = _('Non HA routers can only have one L3 agent.') raise exception.StackValidationFailed(message=msg) def add_dependencies(self, deps): super(Router, self).add_dependencies(deps) external_gw = self.properties[self.EXTERNAL_GATEWAY] if external_gw: external_gw_net = external_gw.get(self.EXTERNAL_GATEWAY_NETWORK) for res in six.itervalues(self.stack): if res.has_interface('OS::Neutron::Subnet'): try: subnet_net = res.properties.get(subnet.Subnet.NETWORK) except (ValueError, TypeError): # Properties errors will be caught later in validation, # where we can report them in their proper context. continue if subnet_net == external_gw_net: deps += (self, res) def _resolve_gateway(self, props): gateway = props.get(self.EXTERNAL_GATEWAY) if gateway: gateway['network_id'] = gateway.pop(self.EXTERNAL_GATEWAY_NETWORK) if gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] is None: del gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] if gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] is None: del gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] else: self._resolve_subnet(gateway) return props def _get_l3_agent_list(self, props): l3_agent_id = props.pop(self.L3_AGENT_ID, None) l3_agent_ids = props.pop(self.L3_AGENT_IDS, None) if not l3_agent_ids and l3_agent_id: l3_agent_ids = [l3_agent_id] return l3_agent_ids def _resolve_subnet(self, gateway): external_gw_fixed_ips = gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] for fixed_ip in external_gw_fixed_ips: for key, value in fixed_ip.copy().items(): if value is None: fixed_ip.pop(key) if self.SUBNET in fixed_ip: fixed_ip['subnet_id'] = fixed_ip.pop(self.SUBNET) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) self._resolve_gateway(props) l3_agent_ids = self._get_l3_agent_list(props) tags = props.pop(self.TAGS, []) router = self.client().create_router({'router': props})['router'] self.resource_id_set(router['id']) if l3_agent_ids: self._replace_agent(l3_agent_ids) if tags: self.set_tags(tags) def check_create_complete(self, *args): attributes = self._show_resource() return self.is_built(attributes) def handle_delete(self): try: self.client().delete_router(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): if self.EXTERNAL_GATEWAY in prop_diff: self._resolve_gateway(prop_diff) if self.L3_AGENT_IDS in prop_diff or self.L3_AGENT_ID in prop_diff: l3_agent_ids = self._get_l3_agent_list(prop_diff) self._replace_agent(l3_agent_ids) if self.TAGS in prop_diff: tags = prop_diff.pop(self.TAGS) self.set_tags(tags) if prop_diff: self.prepare_update_properties(prop_diff) self.client().update_router(self.resource_id, {'router': prop_diff}) def _replace_agent(self, l3_agent_ids=None): ret = self.client().list_l3_agent_hosting_routers(self.resource_id) for agent in ret['agents']: self.client().remove_router_from_l3_agent(agent['id'], self.resource_id) if l3_agent_ids: for l3_agent_id in l3_agent_ids: self.client().add_router_to_l3_agent( l3_agent_id, {'router_id': self.resource_id}) def parse_live_resource_data(self, resource_properties, resource_data): result = super(Router, self).parse_live_resource_data(resource_properties, resource_data) try: ret = self.client().list_l3_agent_hosting_routers(self.resource_id) if ret: result[self.L3_AGENT_IDS] = list(agent['id'] for agent in ret['agents']) except self.client_plugin().exceptions.Forbidden: # Just pass if forbidden pass gateway = resource_data.get(self.EXTERNAL_GATEWAY) if gateway is not None: result[self.EXTERNAL_GATEWAY] = { self.EXTERNAL_GATEWAY_NETWORK: gateway.get('network_id'), self.EXTERNAL_GATEWAY_ENABLE_SNAT: gateway.get('enable_snat') } return result
class SoftwareComponent(sc.SoftwareConfig): ''' A resource for describing and storing a software component. This resource is similar to OS::Heat::SoftwareConfig. In contrast to SoftwareConfig which allows for storing only one configuration (e.g. one script), SoftwareComponent allows for storing multiple configurations to address handling of all lifecycle hooks (CREATE, UPDATE, SUSPEND, RESUME, DELETE) for a software component in one place. This resource is backed by the persistence layer and the API of the SoftwareConfig resource, and only adds handling for the additional 'configs' property and attribute. ''' support_status = support.SupportStatus(version='2014.2') PROPERTIES = ( CONFIGS, INPUTS, OUTPUTS, OPTIONS, ) = ('configs', 'inputs', 'outputs', 'options') CONFIG_PROPERTIES = ( CONFIG_ACTIONS, CONFIG_CONFIG, CONFIG_TOOL, ) = ( 'actions', 'config', 'tool', ) ATTRIBUTES = (CONFIGS_ATTR, ) = ('configs', ) # properties schema for one entry in the 'configs' list config_schema = properties.Schema( properties.Schema.MAP, schema={ CONFIG_ACTIONS: properties.Schema( # Note: This properties schema allows for custom actions to be # specified, which will however require special handling in # in-instance hooks. By default, only the standard actions # stated below will be handled. properties.Schema.LIST, _('Lifecycle actions to which the configuration applies. ' 'The string values provided for this property can include ' 'the standard resource actions CREATE, DELETE, UPDATE, ' 'SUSPEND and RESUME supported by Heat.'), default=[resource.Resource.CREATE, resource.Resource.UPDATE], schema=properties.Schema(properties.Schema.STRING), constraints=[ constr.Length(min=1), ], required=True), CONFIG_CONFIG: sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.CONFIG], CONFIG_TOOL: properties.Schema( properties.Schema.STRING, _('The configuration tool used to actually apply the ' 'configuration on a server. This string property has ' 'to be understood by in-instance tools running inside ' 'deployed servers.'), required=True) }) properties_schema = { CONFIGS: properties.Schema( properties.Schema.LIST, _('The list of configurations for the different lifecycle actions ' 'of the represented software component.'), schema=config_schema, constraints=[constr.Length(min=1)], required=True), INPUTS: sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.INPUTS], OUTPUTS: sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.OUTPUTS], OPTIONS: sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.OPTIONS], } def handle_create(self): props = dict(self.properties) props[self.NAME] = self.physical_resource_name() # use config property of SoftwareConfig to store configs list configs = self.properties.get(self.CONFIGS) props[self.CONFIG] = {self.CONFIGS: configs} # set 'group' to enable component processing by in-instance hook props[self.GROUP] = 'component' sc = self.rpc_client().create_software_config(self.context, **props) self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID]) def _resolve_attribute(self, name): ''' Retrieve attributes of the SoftwareComponent resource. 'configs' returns the list of configurations for the software component's lifecycle actions. If the attribute does not exist, an empty list is returned. ''' if name == self.CONFIGS_ATTR and self.resource_id: try: sc = self.rpc_client().show_software_config( self.context, self.resource_id) # configs list is stored in 'config' property of parent class # (see handle_create) return sc[rpc_api.SOFTWARE_CONFIG_CONFIG].get(self.CONFIGS) except Exception as ex: self.rpc_client().ignore_error_named(ex, 'NotFound') def validate(self): '''Validate SoftwareComponent properties consistency.''' super(SoftwareComponent, self).validate() # One lifecycle action (e.g. CREATE) can only be associated with one # config; otherwise a way to define ordering would be required. configs = self.properties.get(self.CONFIGS, []) config_actions = set() for config in configs: actions = config.get(self.CONFIG_ACTIONS) if any(action in config_actions for action in actions): msg = _('Defining more than one configuration for the same ' 'action in SoftwareComponent "%s" is not allowed.')\ % self.name raise exception.StackValidationFailed(message=msg) config_actions.update(actions)
class RouterInterface(neutron.NeutronResource): """A resource for managing Neutron router interfaces. Router interfaces associate routers with existing subnets or ports. """ required_service_extension = 'router' PROPERTIES = (ROUTER, ROUTER_ID, SUBNET_ID, SUBNET, PORT_ID, PORT) = ('router', 'router_id', 'subnet_id', 'subnet', 'port_id', 'port') properties_schema = { ROUTER: properties.Schema( properties.Schema.STRING, _('The router.'), required=True, constraints=[constraints.CustomConstraint('neutron.router')], ), ROUTER_ID: properties.Schema( properties.Schema.STRING, _('ID of the router.'), support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % ROUTER, version='2015.1', previous_status=support.SupportStatus(version='2013.1'))), constraints=[constraints.CustomConstraint('neutron.router')], ), SUBNET_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % SUBNET, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.subnet')]), SUBNET: properties.Schema( properties.Schema.STRING, _('The subnet, either subnet or port should be ' 'specified.'), constraints=[constraints.CustomConstraint('neutron.subnet')]), PORT_ID: properties.Schema( properties.Schema.STRING, _('The port id, either subnet or port_id should be specified.'), support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % PORT, version='2015.1', previous_status=support.SupportStatus(version='2014.1'))), constraints=[constraints.CustomConstraint('neutron.port')]), PORT: properties.Schema( properties.Schema.STRING, _('The port, either subnet or port should be specified.'), support_status=support.SupportStatus(version='2015.1'), constraints=[constraints.CustomConstraint('neutron.port')]) } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.PORT], value_path=[self.PORT_ID]), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.ROUTER], value_path=[self.ROUTER_ID]), translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.SUBNET], value_path=[self.SUBNET_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.PORT], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='port'), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.ROUTER], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='router'), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet') ] def validate(self): """Validate any of the provided params.""" super(RouterInterface, self).validate() prop_subnet_exists = self.properties.get(self.SUBNET) is not None prop_port_exists = self.properties.get(self.PORT) is not None if prop_subnet_exists and prop_port_exists: raise exception.ResourcePropertyConflict(self.SUBNET, self.PORT) if not prop_subnet_exists and not prop_port_exists: raise exception.PropertyUnspecifiedError(self.SUBNET, self.PORT) def handle_create(self): router_id = dict(self.properties).get(self.ROUTER) key = 'subnet_id' value = dict(self.properties).get(self.SUBNET) if not value: key = 'port_id' value = dict(self.properties).get(self.PORT) self.client().add_interface_router(router_id, {key: value}) self.resource_id_set('%s:%s=%s' % (router_id, key, value)) def handle_delete(self): if not self.resource_id: return tokens = self.resource_id.replace('=', ':').split(':') if len(tokens) == 2: # compatible with old data tokens.insert(1, 'subnet_id') (router_id, key, value) = tokens with self.client_plugin().ignore_not_found: self.client().remove_interface_router(router_id, {key: value})
class ZaqarQueue(resource.Resource): """A resource for managing Zaqar queues. Queue is a logical entity that groups messages. Ideally a queue is created per work type. For example, if you want to compress files, you would create a queue dedicated for this job. Any application that reads from this queue would only compress files. """ default_client_name = "zaqar" support_status = support.SupportStatus(version='2014.2') PROPERTIES = ( NAME, METADATA, ) = ( 'name', 'metadata', ) ATTRIBUTES = ( QUEUE_ID, HREF, ) = ( 'queue_id', 'href', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _("Name of the queue instance to create."), required=True), METADATA: properties.Schema( properties.Schema.MAP, description=_("Arbitrary key/value metadata to store " "contextual information about this queue."), update_allowed=True) } attributes_schema = { QUEUE_ID: attributes.Schema( _("ID of the queue."), cache_mode=attributes.Schema.CACHE_NONE, support_status=support.SupportStatus( status=support.HIDDEN, version='6.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_("Use get_resource|Ref command instead. " "For example: { get_resource : " "<resource_name> }"), version='2015.1', previous_status=support.SupportStatus(version='2014.1') ) ) ), HREF: attributes.Schema( _("The resource href of the queue.") ), } def physical_resource_name(self): return self.properties[self.NAME] def handle_create(self): """Create a zaqar message queue.""" queue_name = self.physical_resource_name() queue = self.client().queue(queue_name, auto_create=False) # Zaqar client doesn't report an error if a queue with the same # id/name already exists, which can cause issue with stack update. if queue.exists(): raise exception.Error(_('Message queue %s already exists.') % queue_name) queue.ensure_exists() self.resource_id_set(queue_name) return queue_name def check_create_complete(self, queue_name): """Set metadata of the newly created queue.""" queue = self.client().queue(queue_name, auto_create=False) if queue.exists(): metadata = self.properties.get('metadata') if metadata: queue.metadata(new_meta=metadata) return True else: raise exception.Error(_('Message queue %s creation failed.') % queue_name) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Update queue metadata.""" if 'metadata' in prop_diff: queue = self.client().queue(self.resource_id, auto_create=False) metadata = prop_diff['metadata'] queue.metadata(new_meta=metadata) def handle_delete(self): """Delete a zaqar message queue.""" if not self.resource_id: return with self.client_plugin().ignore_not_found: self.client().queue(self.resource_id, auto_create=False).delete() def href(self): api_endpoint = self.client().api_url queue_name = self.physical_resource_name() if api_endpoint.endswith('/'): return '%squeues/%s' % (api_endpoint, queue_name) else: return '%s/queues/%s' % (api_endpoint, queue_name) def _resolve_attribute(self, name): if name == self.QUEUE_ID: return self.resource_id elif name == self.HREF: return self.href()
class Subnet(neutron.NeutronResource): PROPERTIES = ( NETWORK_ID, NETWORK, CIDR, VALUE_SPECS, NAME, IP_VERSION, DNS_NAMESERVERS, GATEWAY_IP, ENABLE_DHCP, ALLOCATION_POOLS, TENANT_ID, HOST_ROUTES, ) = ( 'network_id', 'network', 'cidr', 'value_specs', 'name', 'ip_version', 'dns_nameservers', 'gateway_ip', 'enable_dhcp', 'allocation_pools', 'tenant_id', 'host_routes', ) _ALLOCATION_POOL_KEYS = ( ALLOCATION_POOL_START, ALLOCATION_POOL_END, ) = ( 'start', 'end', ) _HOST_ROUTES_KEYS = ( ROUTE_DESTINATION, ROUTE_NEXTHOP, ) = ( 'destination', 'nexthop', ) ATTRIBUTES = ( NAME_ATTR, NETWORK_ID_ATTR, TENANT_ID_ATTR, ALLOCATION_POOLS_ATTR, GATEWAY_IP_ATTR, HOST_ROUTES_ATTR, IP_VERSION_ATTR, CIDR_ATTR, DNS_NAMESERVERS_ATTR, ENABLE_DHCP_ATTR, SHOW, ) = ( 'name', 'network_id', 'tenant_id', 'allocation_pools', 'gateway_ip', 'host_routes', 'ip_version', 'cidr', 'dns_nameservers', 'enable_dhcp', 'show', ) properties_schema = { NETWORK_ID: properties.Schema(properties.Schema.STRING, support_status=support.SupportStatus( support.DEPRECATED, _('Use property %s.') % NETWORK), required=False), NETWORK: properties.Schema(properties.Schema.STRING, _('The ID of the attached network.'), required=False), CIDR: properties.Schema(properties.Schema.STRING, _('The CIDR.'), required=True), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the creation request.'), default={}, update_allowed=True), NAME: properties.Schema(properties.Schema.STRING, _('The name of the subnet.'), update_allowed=True), IP_VERSION: properties.Schema(properties.Schema.INTEGER, _('The IP version, which is 4 or 6.'), default=4, constraints=[ constraints.AllowedValues([4, 6]), ]), DNS_NAMESERVERS: properties.Schema(properties.Schema.LIST, _('A specified set of DNS name servers to be used.'), default=[], update_allowed=True), GATEWAY_IP: properties.Schema(properties.Schema.STRING, _('The gateway IP address.'), update_allowed=True), ENABLE_DHCP: properties.Schema( properties.Schema.BOOLEAN, _('Set to true if DHCP is enabled and false if DHCP is disabled.'), default=True, update_allowed=True), ALLOCATION_POOLS: properties.Schema( properties.Schema.LIST, _('The start and end addresses for the allocation pools.'), schema=properties.Schema( properties.Schema.MAP, schema={ ALLOCATION_POOL_START: properties.Schema(properties.Schema.STRING, required=True), ALLOCATION_POOL_END: properties.Schema(properties.Schema.STRING, required=True), }, )), TENANT_ID: properties.Schema( properties.Schema.STRING, _('The ID of the tenant who owns the network. Only administrative' ' users can specify a tenant ID other than their own.')), HOST_ROUTES: properties.Schema(properties.Schema.LIST, schema=properties.Schema( properties.Schema.MAP, schema={ ROUTE_DESTINATION: properties.Schema(properties.Schema.STRING, required=True), ROUTE_NEXTHOP: properties.Schema(properties.Schema.STRING, required=True), }, )), } attributes_schema = { NAME_ATTR: attributes.Schema(_("Friendly name of the subnet.")), NETWORK_ID_ATTR: attributes.Schema(_("Parent network of the subnet.")), TENANT_ID_ATTR: attributes.Schema(_("Tenant owning the subnet.")), ALLOCATION_POOLS_ATTR: attributes.Schema(_("Ip allocation pools and their ranges.")), GATEWAY_IP_ATTR: attributes.Schema(_("Ip of the subnet's gateway.")), HOST_ROUTES_ATTR: attributes.Schema(_("Additional routes for this subnet.")), IP_VERSION_ATTR: attributes.Schema(_("Ip version for the subnet.")), CIDR_ATTR: attributes.Schema(_("CIDR block notation for this subnet.")), DNS_NAMESERVERS_ATTR: attributes.Schema(_("List of dns nameservers.")), ENABLE_DHCP_ATTR: attributes.Schema( _("'true' if DHCP is enabled for this subnet; 'false' otherwise.") ), SHOW: attributes.Schema(_("All attributes.")), } @classmethod def _null_gateway_ip(cls, props): if cls.GATEWAY_IP not in props: return # Specifying null in the gateway_ip will result in # a property containing an empty string. # A null gateway_ip has special meaning in the API # so this needs to be set back to None. # See bug https://bugs.launchpad.net/heat/+bug/1226666 if props.get(cls.GATEWAY_IP) == '': props[cls.GATEWAY_IP] = None def validate(self): super(Subnet, self).validate() self._validate_depr_property_required(self.properties, self.NETWORK, self.NETWORK_ID) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) self.client_plugin().resolve_network(props, self.NETWORK, 'network_id') self._null_gateway_ip(props) subnet = self.neutron().create_subnet({'subnet': props})['subnet'] self.resource_id_set(subnet['id']) def handle_delete(self): client = self.neutron() try: client.delete_subnet(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return self._delete_task() def _show_resource(self): return self.neutron().show_subnet(self.resource_id)['subnet'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): props = self.prepare_update_properties(json_snippet) self.neutron().update_subnet(self.resource_id, {'subnet': props})
class GlanceImage(resource.Resource): """A resource managing images in Glance. A resource provides managing images that are meant to be used with other services. """ support_status = support.SupportStatus(version='2014.2') PROPERTIES = ( NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED, DISK_FORMAT, CONTAINER_FORMAT, LOCATION ) = ( 'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected', 'disk_format', 'container_format', 'location' ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name for the image. The name of an image is not ' 'unique to a Image Service node.') ), IMAGE_ID: properties.Schema( properties.Schema.STRING, _('The image ID. Glance will generate a UUID if not specified.') ), IS_PUBLIC: properties.Schema( properties.Schema.BOOLEAN, _('Scope of image accessibility. Public or private. ' 'Default value is False means private.'), default=False, ), MIN_DISK: properties.Schema( properties.Schema.INTEGER, _('Amount of disk space (in GB) required to boot image. ' 'Default value is 0 if not specified ' 'and means no limit on the disk size.'), constraints=[ constraints.Range(min=0), ] ), MIN_RAM: properties.Schema( properties.Schema.INTEGER, _('Amount of ram (in MB) required to boot image. Default value ' 'is 0 if not specified and means no limit on the ram size.'), constraints=[ constraints.Range(min=0), ] ), PROTECTED: properties.Schema( properties.Schema.BOOLEAN, _('Whether the image can be deleted. If the value is True, ' 'the image is protected and cannot be deleted.') ), DISK_FORMAT: properties.Schema( properties.Schema.STRING, _('Disk format of image.'), required=True, constraints=[ constraints.AllowedValues(['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']) ] ), CONTAINER_FORMAT: properties.Schema( properties.Schema.STRING, _('Container format of image.'), required=True, constraints=[ constraints.AllowedValues(['ami', 'ari', 'aki', 'bare', 'ova', 'ovf']) ] ), LOCATION: properties.Schema( properties.Schema.STRING, _('URL where the data for this image already resides. For ' 'example, if the image data is stored in swift, you could ' 'specify "swift://example.com/container/obj".'), required=True, ), } default_client_name = 'glance' entity = 'images' def handle_create(self): args = dict((k, v) for k, v in self.properties.items() if v is not None) image_id = self.client().images.create(**args).id self.resource_id_set(image_id) return image_id def check_create_complete(self, image_id): image = self.client().images.get(image_id) return image.status == 'active' def _show_resource(self): if self.glance().version == 1.0: return super(GlanceImage, self)._show_resource() else: image = self.glance().images.get(self.resource_id) return dict(image)
class PoolMember(neutron.NeutronResource): """A resource to handle loadbalancer members.""" required_service_extension = 'lbaas' support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( POOL_ID, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP, ) = ( 'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up', ) ATTRIBUTES = ( ADMIN_STATE_UP_ATTR, TENANT_ID, WEIGHT_ATTR, ADDRESS_ATTR, POOL_ID_ATTR, PROTOCOL_PORT_ATTR, ) = ( 'admin_state_up', 'tenant_id', 'weight', 'address', 'pool_id', 'protocol_port', ) properties_schema = { POOL_ID: properties.Schema(properties.Schema.STRING, _('The ID of the load balancing pool.'), required=True, update_allowed=True), ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the pool member on the pool network.'), required=True, constraints=[constraints.CustomConstraint('ip_addr')]), PROTOCOL_PORT: properties.Schema( properties.Schema.INTEGER, _('TCP port on which the pool member listens for requests or ' 'connections.'), required=True, constraints=[ constraints.Range(0, 65535), ]), WEIGHT: properties.Schema( properties.Schema.INTEGER, _('Weight of pool member in the pool (default to 1).'), constraints=[ constraints.Range(0, 256), ], update_allowed=True), ADMIN_STATE_UP: properties.Schema(properties.Schema.BOOLEAN, _('The administrative state of the pool member.'), default=True), } attributes_schema = { ADMIN_STATE_UP_ATTR: attributes.Schema(_('The administrative state of this pool member.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_('Tenant owning the pool member.'), type=attributes.Schema.STRING), WEIGHT_ATTR: attributes.Schema(_('Weight of the pool member in the pool.'), type=attributes.Schema.STRING), ADDRESS_ATTR: attributes.Schema(_('IP address of the pool member.'), type=attributes.Schema.STRING), POOL_ID_ATTR: attributes.Schema(_('The ID of the load balancing pool.'), type=attributes.Schema.STRING), PROTOCOL_PORT_ATTR: attributes.Schema(_( 'TCP port on which the pool member listens for requests or ' 'connections.'), type=attributes.Schema.STRING), } def handle_create(self): pool = self.properties[self.POOL_ID] protocol_port = self.properties[self.PROTOCOL_PORT] address = self.properties[self.ADDRESS] admin_state_up = self.properties[self.ADMIN_STATE_UP] weight = self.properties[self.WEIGHT] params = { 'pool_id': pool, 'address': address, 'protocol_port': protocol_port, 'admin_state_up': admin_state_up } if weight is not None: params['weight'] = weight member = self.client().create_member({'member': params})['member'] self.resource_id_set(member['id']) def _show_resource(self): return self.client().show_member(self.resource_id)['member'] def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: self.client().update_member(self.resource_id, {'member': prop_diff}) def handle_delete(self): try: self.client().delete_member(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: return True
class CloudServer(server.Server): """Resource for Rackspace Cloud Servers. This resource overloads existent integrated OS::Nova::Server resource and is used for Rackspace Cloud Servers. """ support_status = support.SupportStatus( status=support.UNSUPPORTED, message=_('This resource is not supported, use at your own risk.')) # Rackspace Cloud automation statuses SM_STATUS_IN_PROGRESS = 'In Progress' SM_STATUS_COMPLETE = 'Complete' SM_STATUS_BUILD_ERROR = 'Build Error' # RackConnect automation statuses RC_STATUS_DEPLOYING = 'DEPLOYING' RC_STATUS_DEPLOYED = 'DEPLOYED' RC_STATUS_FAILED = 'FAILED' RC_STATUS_UNPROCESSABLE = 'UNPROCESSABLE' properties_schema = copy.deepcopy(server.Server.properties_schema) properties_schema.update({ server.Server.USER_DATA_FORMAT: properties.Schema( properties.Schema.STRING, _('How the user_data should be formatted for the server. ' 'For RAW the user_data is passed to Nova unmodified. ' 'For SOFTWARE_CONFIG user_data is bundled as part of the ' 'software config data, and metadata is derived from any ' 'associated SoftwareDeployment resources.'), default=server.Server.RAW, constraints=[ constraints.AllowedValues( [server.Server.RAW, server.Server.SOFTWARE_CONFIG]) ]), }) properties_schema.update({ server.Server.SOFTWARE_CONFIG_TRANSPORT: properties.Schema( properties.Schema.STRING, _('How the server should receive the metadata required for ' 'software configuration. POLL_TEMP_URL is the only ' 'supported transport on Rackspace Cloud. This property is ' 'retained for compatability.'), default=server.Server.POLL_TEMP_URL, update_allowed=True, constraints=[ constraints.AllowedValues([server.Server.POLL_TEMP_URL]) ]), }) def __init__(self, name, json_snippet, stack): super(CloudServer, self).__init__(name, json_snippet, stack) self._managed_cloud_started_event_sent = False self._rack_connect_started_event_sent = False def _config_drive(self): user_data_format = self.properties.get(self.USER_DATA_FORMAT, "") is_sw_config = user_data_format == self.SOFTWARE_CONFIG user_data = self.properties.get(self.USER_DATA) config_drive = self.properties.get(self.CONFIG_DRIVE) if config_drive or is_sw_config or user_data: return True else: return False def _check_rax_automation_complete(self, server): if not self._managed_cloud_started_event_sent: msg = _("Waiting for Rackspace Cloud automation to complete") self._add_event(self.action, self.status, msg) self._managed_cloud_started_event_sent = True if 'rax_service_level_automation' not in server.metadata: LOG.debug("Cloud server does not have the " "rax_service_level_automation metadata tag yet") return False mc_status = server.metadata['rax_service_level_automation'] LOG.debug("Rackspace Cloud automation status: %s" % mc_status) if mc_status == self.SM_STATUS_IN_PROGRESS: return False elif mc_status == self.SM_STATUS_COMPLETE: msg = _("Rackspace Cloud automation has completed") self._add_event(self.action, self.status, msg) return True elif mc_status == self.SM_STATUS_BUILD_ERROR: raise exception.Error(_("Rackspace Cloud automation failed")) else: raise exception.Error( _("Unknown Rackspace Cloud automation " "status: %s") % mc_status) def _check_rack_connect_complete(self, server): if not self._rack_connect_started_event_sent: msg = _("Waiting for RackConnect automation to complete") self._add_event(self.action, self.status, msg) self._rack_connect_started_event_sent = True if 'rackconnect_automation_status' not in server.metadata: LOG.debug("RackConnect server does not have the " "rackconnect_automation_status metadata tag yet") return False rc_status = server.metadata['rackconnect_automation_status'] LOG.debug("RackConnect automation status: %s" % rc_status) if rc_status == self.RC_STATUS_DEPLOYING: return False elif rc_status == self.RC_STATUS_DEPLOYED: self._server = None # The public IP changed, forget old one return True elif rc_status == self.RC_STATUS_UNPROCESSABLE: # UNPROCESSABLE means the RackConnect automation was not # attempted (eg. Cloud Server in a different DC than # dedicated gear, so RackConnect does not apply). It is # okay if we do not raise an exception. reason = server.metadata.get('rackconnect_unprocessable_reason', None) if reason is not None: LOG.warning(_LW("RackConnect unprocessable reason: %s"), reason) msg = _("RackConnect automation has completed") self._add_event(self.action, self.status, msg) return True elif rc_status == self.RC_STATUS_FAILED: raise exception.Error(_("RackConnect automation FAILED")) else: msg = _("Unknown RackConnect automation status: %s") % rc_status raise exception.Error(msg) def check_create_complete(self, server_id): """Check if server creation is complete and handle server configs.""" if not super(CloudServer, self).check_create_complete(server_id): return False server = self.client_plugin().fetch_server(server_id) if not server: return False if ('rack_connect' in self.context.roles and not self._check_rack_connect_complete(server)): return False if not self._check_rax_automation_complete(server): return False return True # Since rackspace compute service does not support 'os-interface' endpoint, # accessing addresses attribute of OS::Nova::Server results in NotFound # error. Here overrdiing '_add_port_for_address' method and using different # endpoint named 'os-virtual-interfacesv2' to get the same information. def _add_port_for_address(self, server): def get_port(net_name, address): for iface in ifaces: for ip_addr in iface.ip_addresses: if ip_addr['network_label'] == net_name and ip_addr[ 'address'] == address: return iface.id nets = copy.deepcopy(server.addresses) nova_ext = self.client().os_virtual_interfacesv2_python_novaclient_ext ifaces = nova_ext.list(server.id) for net_name, addresses in nets.items(): for address in addresses: address['port'] = get_port(net_name, address['addr']) return self._extend_networks(nets)
class RandomString(resource.Resource): ''' A resource which generates a random string. This is useful for configuring passwords and secrets on services. ''' support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( LENGTH, SEQUENCE, CHARACTER_CLASSES, CHARACTER_SEQUENCES, SALT, ) = ( 'length', 'sequence', 'character_classes', 'character_sequences', 'salt', ) _CHARACTER_CLASSES_KEYS = ( CHARACTER_CLASSES_CLASS, CHARACTER_CLASSES_MIN, ) = ( 'class', 'min', ) _CHARACTER_SEQUENCES = ( CHARACTER_SEQUENCES_SEQUENCE, CHARACTER_SEQUENCES_MIN, ) = ( 'sequence', 'min', ) ATTRIBUTES = ( VALUE, ) = ( 'value', ) properties_schema = { LENGTH: properties.Schema( properties.Schema.INTEGER, _('Length of the string to generate.'), default=32, constraints=[ constraints.Range(1, 512), ] ), SEQUENCE: properties.Schema( properties.Schema.STRING, _('Sequence of characters to build the random string from.'), constraints=[ constraints.AllowedValues(['lettersdigits', 'letters', 'lowercase', 'uppercase', 'digits', 'hexdigits', 'octdigits']), ], support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, message=_('Use property %s.') % CHARACTER_CLASSES, version='2014.2' ) ) ), CHARACTER_CLASSES: properties.Schema( properties.Schema.LIST, _('A list of character class and their constraints to generate ' 'the random string from.'), schema=properties.Schema( properties.Schema.MAP, schema={ CHARACTER_CLASSES_CLASS: properties.Schema( properties.Schema.STRING, (_('A character class and its corresponding %(min)s ' 'constraint to generate the random string from.') % {'min': CHARACTER_CLASSES_MIN}), constraints=[ constraints.AllowedValues( ['lettersdigits', 'letters', 'lowercase', 'uppercase', 'digits', 'hexdigits', 'octdigits']), ], default='lettersdigits'), CHARACTER_CLASSES_MIN: properties.Schema( properties.Schema.INTEGER, _('The minimum number of characters from this ' 'character class that will be in the generated ' 'string.'), default=1, constraints=[ constraints.Range(1, 512), ] ) } ) ), CHARACTER_SEQUENCES: properties.Schema( properties.Schema.LIST, _('A list of character sequences and their constraints to ' 'generate the random string from.'), schema=properties.Schema( properties.Schema.MAP, schema={ CHARACTER_SEQUENCES_SEQUENCE: properties.Schema( properties.Schema.STRING, _('A character sequence and its corresponding %(min)s ' 'constraint to generate the random string ' 'from.') % {'min': CHARACTER_SEQUENCES_MIN}, required=True), CHARACTER_SEQUENCES_MIN: properties.Schema( properties.Schema.INTEGER, _('The minimum number of characters from this ' 'sequence that will be in the generated ' 'string.'), default=1, constraints=[ constraints.Range(1, 512), ] ) } ) ), SALT: properties.Schema( properties.Schema.STRING, _('Value which can be set or changed on stack update to trigger ' 'the resource for replacement with a new random string . The ' 'salt value itself is ignored by the random generator.') ), } attributes_schema = { VALUE: attributes.Schema( _('The random string generated by this resource. This value is ' 'also available by referencing the resource.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING ), } _sequences = { 'lettersdigits': string.ascii_letters + string.digits, 'letters': string.ascii_letters, 'lowercase': string.ascii_lowercase, 'uppercase': string.ascii_uppercase, 'digits': string.digits, 'hexdigits': string.digits + 'ABCDEF', 'octdigits': string.octdigits } def translation_rules(self): if self.properties.get(self.SEQUENCE): return [ properties.TranslationRule( self.properties, properties.TranslationRule.ADD, [self.CHARACTER_CLASSES], [{self.CHARACTER_CLASSES_CLASS: self.properties.get( self.SEQUENCE), self.CHARACTER_CLASSES_MIN: 1}]), properties.TranslationRule( self.properties, properties.TranslationRule.DELETE, [self.SEQUENCE] ) ] @staticmethod def _deprecated_random_string(sequence, length): rand = random.SystemRandom() return ''.join(rand.choice(sequence) for x in six.moves.xrange(length)) def _generate_random_string(self, char_sequences, char_classes, length): random_string = "" # Add the minimum number of chars from each char sequence & char class if char_sequences: for char_seq in char_sequences: seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE] seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN] for i in six.moves.xrange(seq_min): random_string += random.choice(seq) if char_classes: for char_class in char_classes: cclass_class = char_class[self.CHARACTER_CLASSES_CLASS] cclass_seq = self._sequences[cclass_class] cclass_min = char_class[self.CHARACTER_CLASSES_MIN] for i in six.moves.xrange(cclass_min): random_string += random.choice(cclass_seq) def random_class_char(): cclass_dict = random.choice(char_classes) cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS] cclass_seq = self._sequences[cclass_class] return random.choice(cclass_seq) def random_seq_char(): seq_dict = random.choice(char_sequences) seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE] return random.choice(seq) # Fill up rest with random chars from provided sequences & classes if char_sequences and char_classes: weighted_choices = ([True] * len(char_classes) + [False] * len(char_sequences)) while len(random_string) < length: if random.choice(weighted_choices): random_string += random_class_char() else: random_string += random_seq_char() elif char_sequences: while len(random_string) < length: random_string += random_seq_char() else: while len(random_string) < length: random_string += random_class_char() # Randomize string random_string = ''.join(random.sample(random_string, len(random_string))) return random_string def validate(self): super(RandomString, self).validate() sequence = self.properties[self.SEQUENCE] char_sequences = self.properties[self.CHARACTER_SEQUENCES] char_classes = self.properties[self.CHARACTER_CLASSES] if sequence and (char_sequences or char_classes): msg = (_("Cannot use deprecated '%(seq)s' property along with " "'%(char_seqs)s' or '%(char_classes)s' properties") % {'seq': self.SEQUENCE, 'char_seqs': self.CHARACTER_SEQUENCES, 'char_classes': self.CHARACTER_CLASSES}) raise exception.StackValidationFailed(message=msg) def char_min(char_dicts, min_prop): if char_dicts: return sum(char_dict[min_prop] for char_dict in char_dicts) return 0 length = self.properties[self.LENGTH] min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) + char_min(char_classes, self.CHARACTER_CLASSES_MIN)) if min_length > length: msg = _("Length property cannot be smaller than combined " "character class and character sequence minimums") raise exception.StackValidationFailed(message=msg) def handle_create(self): char_sequences = self.properties[self.CHARACTER_SEQUENCES] char_classes = self.properties[self.CHARACTER_CLASSES] length = self.properties[self.LENGTH] if char_sequences or char_classes: random_string = self._generate_random_string(char_sequences, char_classes, length) else: sequence = self.properties[self.SEQUENCE] if not sequence: # Deprecated property not provided, use a default sequence = "lettersdigits" char_seq = self._sequences[sequence] random_string = self._deprecated_random_string(char_seq, length) self.data_set('value', random_string, redact=True) self.resource_id_set(self.physical_resource_name()) def _resolve_attribute(self, name): if name == self.VALUE: return self.data().get(self.VALUE) def FnGetRefId(self): if self.resource_id is not None: return self.data().get('value') else: return six.text_type(self.name)
class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin): PROPERTIES = ( AVAILABILITY_ZONE, SIZE, SNAPSHOT_ID, BACKUP_ID, NAME, DESCRIPTION, VOLUME_TYPE, METADATA, IMAGE_REF, IMAGE, SOURCE_VOLID, CINDER_SCHEDULER_HINTS, READ_ONLY, MULTI_ATTACH, ) = ( 'availability_zone', 'size', 'snapshot_id', 'backup_id', 'name', 'description', 'volume_type', 'metadata', 'imageRef', 'image', 'source_volid', 'scheduler_hints', 'read_only', 'multiattach', ) ATTRIBUTES = ( AVAILABILITY_ZONE_ATTR, SIZE_ATTR, SNAPSHOT_ID_ATTR, DISPLAY_NAME_ATTR, DISPLAY_DESCRIPTION_ATTR, VOLUME_TYPE_ATTR, METADATA_ATTR, SOURCE_VOLID_ATTR, STATUS, CREATED_AT, BOOTABLE, METADATA_VALUES_ATTR, ENCRYPTED_ATTR, ATTACHMENTS, MULTI_ATTACH_ATTR, ) = ( 'availability_zone', 'size', 'snapshot_id', 'display_name', 'display_description', 'volume_type', 'metadata', 'source_volid', 'status', 'created_at', 'bootable', 'metadata_values', 'encrypted', 'attachments', 'multiattach', ) properties_schema = { AVAILABILITY_ZONE: properties.Schema( properties.Schema.STRING, _('The availability zone in which the volume will be created.')), SIZE: properties.Schema(properties.Schema.INTEGER, _('The size of the volume in GB. ' 'On update only increase in size is supported.'), update_allowed=True, constraints=[ constraints.Range(min=1), ]), SNAPSHOT_ID: properties.Schema( properties.Schema.STRING, _('If specified, the snapshot to create the volume from.'), constraints=[constraints.CustomConstraint('cinder.snapshot')]), BACKUP_ID: properties.Schema( properties.Schema.STRING, _('If specified, the backup to create the volume from.'), update_allowed=True, constraints=[constraints.CustomConstraint('cinder.backup')]), NAME: properties.Schema( properties.Schema.STRING, _('A name used to distinguish the volume.'), update_allowed=True, ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('A description of the volume.'), update_allowed=True, ), VOLUME_TYPE: properties.Schema( properties.Schema.STRING, _('If specified, the type of volume to use, mapping to a ' 'specific backend.'), constraints=[constraints.CustomConstraint('cinder.vtype')], update_allowed=True), METADATA: properties.Schema( properties.Schema.MAP, _('Key/value pairs to associate with the volume.'), update_allowed=True, ), IMAGE_REF: properties.Schema(properties.Schema.STRING, _('The ID of the image to create the volume from.'), support_status=support.SupportStatus( status=support.HIDDEN, message=_('Use property %s.') % IMAGE, version='5.0.0', previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.1'))), IMAGE: properties.Schema( properties.Schema.STRING, _('If specified, the name or ID of the image to create the ' 'volume from.'), constraints=[constraints.CustomConstraint('glance.image')]), SOURCE_VOLID: properties.Schema( properties.Schema.STRING, _('If specified, the volume to use as source.'), constraints=[constraints.CustomConstraint('cinder.volume')]), CINDER_SCHEDULER_HINTS: properties.Schema( properties.Schema.MAP, _('Arbitrary key-value pairs specified by the client to help ' 'the Cinder scheduler creating a volume.'), support_status=support.SupportStatus(version='2015.1')), READ_ONLY: properties.Schema( properties.Schema.BOOLEAN, _('Enables or disables read-only access mode of volume.'), support_status=support.SupportStatus(version='5.0.0'), update_allowed=True, ), MULTI_ATTACH: properties.Schema( properties.Schema.BOOLEAN, _('Whether allow the volume to be attached more than once. ' 'This property is only supported from Cinder API v2.'), support_status=support.SupportStatus(version='6.0.0'), ), } attributes_schema = { AVAILABILITY_ZONE_ATTR: attributes.Schema( _('The availability zone in which the volume is located.'), type=attributes.Schema.STRING), SIZE_ATTR: attributes.Schema(_('The size of the volume in GB.'), type=attributes.Schema.STRING), SNAPSHOT_ID_ATTR: attributes.Schema( _('The snapshot the volume was created from, if any.'), type=attributes.Schema.STRING), DISPLAY_NAME_ATTR: attributes.Schema(_('Name of the volume.'), type=attributes.Schema.STRING), DISPLAY_DESCRIPTION_ATTR: attributes.Schema(_('Description of the volume.'), type=attributes.Schema.STRING), VOLUME_TYPE_ATTR: attributes.Schema( _('The type of the volume mapping to a backend, if any.'), type=attributes.Schema.STRING), METADATA_ATTR: attributes.Schema(_('Key/value pairs associated with the volume.'), type=attributes.Schema.STRING), SOURCE_VOLID_ATTR: attributes.Schema(_('The volume used as source, if any.'), type=attributes.Schema.STRING), STATUS: attributes.Schema(_('The current status of the volume.'), type=attributes.Schema.STRING), CREATED_AT: attributes.Schema(_('The timestamp indicating volume creation.'), type=attributes.Schema.STRING), BOOTABLE: attributes.Schema( _('Boolean indicating if the volume can be booted or not.'), type=attributes.Schema.STRING), METADATA_VALUES_ATTR: attributes.Schema( _('Key/value pairs associated with the volume in raw dict form.'), type=attributes.Schema.MAP), ENCRYPTED_ATTR: attributes.Schema( _('Boolean indicating if the volume is encrypted or not.'), type=attributes.Schema.STRING), ATTACHMENTS: attributes.Schema(_('The list of attachments of the volume.'), type=attributes.Schema.STRING), MULTI_ATTACH_ATTR: attributes.Schema( _('Boolean indicating whether allow the volume to be attached ' 'more than once.'), type=attributes.Schema.BOOLEAN, support_status=support.SupportStatus(version='6.0.0'), ), } _volume_creating_status = ['creating', 'restoring-backup', 'downloading'] entity = 'volumes' def translation_rules(self): return [ properties.TranslationRule(self.properties, properties.TranslationRule.REPLACE, [self.IMAGE], value_path=[self.IMAGE_REF]) ] def _name(self): name = self.properties[self.NAME] if name: return name return super(CinderVolume, self)._name() def _description(self): return self.properties[self.DESCRIPTION] def _create_arguments(self): arguments = { 'size': self.properties[self.SIZE], 'availability_zone': self.properties[self.AVAILABILITY_ZONE], } scheduler_hints = self._scheduler_hints( self.properties[self.CINDER_SCHEDULER_HINTS]) if scheduler_hints: arguments[self.CINDER_SCHEDULER_HINTS] = scheduler_hints if self.properties[self.IMAGE]: arguments['imageRef'] = self.client_plugin('glance').get_image_id( self.properties[self.IMAGE]) elif self.properties[self.IMAGE_REF]: arguments['imageRef'] = self.properties[self.IMAGE_REF] optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID, self.METADATA, self.MULTI_ATTACH) arguments.update((prop, self.properties[prop]) for prop in optionals if self.properties[prop] is not None) return arguments def _resolve_attribute(self, name): cinder = self.client() vol = cinder.volumes.get(self.resource_id) if name == self.METADATA_ATTR: return six.text_type(jsonutils.dumps(vol.metadata)) elif name == self.METADATA_VALUES_ATTR: return vol.metadata if cinder.volume_api_version >= 2: if name == self.DISPLAY_NAME_ATTR: return vol.name elif name == self.DISPLAY_DESCRIPTION_ATTR: return vol.description return six.text_type(getattr(vol, name)) # TODO(huangtianhua): remove this method when bug #1479641 is fixed. def _show_resource(self): volume = self.client().volumes.get(self.resource_id) return volume._info def handle_create(self): vol_id = super(CinderVolume, self).handle_create() read_only_flag = self.properties.get(self.READ_ONLY) if read_only_flag is not None: self.client().volumes.update_readonly_flag(vol_id, read_only_flag) return vol_id def _extend_volume(self, new_size): try: self.client().volumes.extend(self.resource_id, new_size) except Exception as ex: if self.client_plugin().is_client_exception(ex): raise exception.Error( _("Failed to extend volume %(vol)s - %(err)s") % { 'vol': self.resource_id, 'err': str(ex) }) else: raise return True def _check_extend_volume_complete(self): vol = self.client().volumes.get(self.resource_id) if vol.status == 'extending': LOG.debug("Volume %s is being extended" % vol.id) return False if vol.status != 'available': LOG.info( _LI("Resize failed: Volume %(vol)s " "is in %(status)s state."), { 'vol': vol.id, 'status': vol.status }) raise exception.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume resize failed')) LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id}) return True def _backup_restore(self, vol_id, backup_id): try: self.client().restores.restore(backup_id, vol_id) except Exception as ex: if self.client_plugin().is_client_exception(ex): raise exception.Error( _("Failed to restore volume %(vol)s from backup %(backup)s " "- %(err)s") % { 'vol': vol_id, 'backup': backup_id, 'err': ex }) else: raise return True def _check_backup_restore_complete(self): vol = self.client().volumes.get(self.resource_id) if vol.status == 'restoring-backup': LOG.debug("Volume %s is being restoring from backup" % vol.id) return False if vol.status != 'available': LOG.info( _LI("Restore failed: Volume %(vol)s is in %(status)s " "state."), { 'vol': vol.id, 'status': vol.status }) raise exception.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume backup restore failed')) LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id}) return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): vol = None cinder = self.client() prg_resize = None prg_attach = None prg_detach = None prg_backup_restore = None # update the name and description for cinder volume if self.NAME in prop_diff or self.DESCRIPTION in prop_diff: vol = cinder.volumes.get(self.resource_id) update_name = (prop_diff.get(self.NAME) or self.properties[self.NAME]) update_description = (prop_diff.get(self.DESCRIPTION) or self.properties[self.DESCRIPTION]) kwargs = self._fetch_name_and_description( cinder.volume_api_version, update_name, update_description) cinder.volumes.update(vol, **kwargs) # update the metadata for cinder volume if self.METADATA in prop_diff: if not vol: vol = cinder.volumes.get(self.resource_id) metadata = prop_diff.get(self.METADATA) cinder.volumes.update_all_metadata(vol, metadata) # retype if self.VOLUME_TYPE in prop_diff: if cinder.volume_api_version == 1: LOG.info( _LI('Volume type update not supported ' 'by Cinder API V1.')) raise exception.NotSupported( feature=_('Using Cinder API V1, volume_type update')) else: if not vol: vol = cinder.volumes.get(self.resource_id) new_vol_type = prop_diff.get(self.VOLUME_TYPE) cinder.volumes.retype(vol, new_vol_type, 'never') # update read_only access mode if self.READ_ONLY in prop_diff: flag = prop_diff.get(self.READ_ONLY) cinder.volumes.update_readonly_flag(self.resource_id, flag) # restore the volume from backup if self.BACKUP_ID in prop_diff: prg_backup_restore = progress.VolumeBackupRestoreProgress( vol_id=self.resource_id, backup_id=prop_diff.get(self.BACKUP_ID)) # extend volume size if self.SIZE in prop_diff: if not vol: vol = cinder.volumes.get(self.resource_id) new_size = prop_diff[self.SIZE] if new_size < vol.size: raise exception.NotSupported(feature=_("Shrinking volume")) elif new_size > vol.size: prg_resize = progress.VolumeResizeProgress(size=new_size) if vol.attachments: # NOTE(pshchelo): # this relies on current behavior of cinder attachments, # i.e. volume attachments is a list with len<=1, # so the volume can be attached only to single instance, # and id of attachment is the same as id of the volume # it describes, so detach/attach the same volume # will not change volume attachment id. server_id = vol.attachments[0]['server_id'] device = vol.attachments[0]['device'] attachment_id = vol.attachments[0]['id'] prg_detach = progress.VolumeDetachProgress( server_id, vol.id, attachment_id) prg_attach = progress.VolumeAttachProgress( server_id, vol.id, device) return prg_backup_restore, prg_detach, prg_resize, prg_attach def _detach_volume_to_complete(self, prg_detach): if not prg_detach.called: self.client_plugin('nova').detach_volume(prg_detach.srv_id, prg_detach.attach_id) prg_detach.called = True return False if not prg_detach.cinder_complete: cinder_complete_res = self.client_plugin( ).check_detach_volume_complete(prg_detach.vol_id) prg_detach.cinder_complete = cinder_complete_res return False if not prg_detach.nova_complete: prg_detach.nova_complete = self.client_plugin( 'nova').check_detach_volume_complete(prg_detach.srv_id, prg_detach.attach_id) return False def _attach_volume_to_complete(self, prg_attach): if not prg_attach.called: prg_attach.called = self.client_plugin('nova').attach_volume( prg_attach.srv_id, prg_attach.vol_id, prg_attach.device) return False if not prg_attach.complete: prg_attach.complete = self.client_plugin( ).check_attach_volume_complete(prg_attach.vol_id) return prg_attach.complete def check_update_complete(self, checkers): prg_backup_restore, prg_detach, prg_resize, prg_attach = checkers if prg_backup_restore: if not prg_backup_restore.called: prg_backup_restore.called = self._backup_restore( prg_backup_restore.vol_id, prg_backup_restore.backup_id) return False if not prg_backup_restore.complete: prg_backup_restore.complete = \ self._check_backup_restore_complete() return prg_backup_restore.complete and not prg_resize if not prg_resize: return True # detach volume if prg_detach: if not prg_detach.nova_complete: self._detach_volume_to_complete(prg_detach) return False # resize volume if not prg_resize.called: prg_resize.called = self._extend_volume(prg_resize.size) return False if not prg_resize.complete: prg_resize.complete = self._check_extend_volume_complete() return prg_resize.complete and not prg_attach # reattach volume back if prg_attach: return self._attach_volume_to_complete(prg_attach) return True def handle_snapshot(self): backup = self.client().backups.create(self.resource_id) self.data_set('backup_id', backup.id) return backup.id def check_snapshot_complete(self, backup_id): backup = self.client().backups.get(backup_id) if backup.status == 'creating': return False if backup.status == 'available': return True raise exception.Error(backup.fail_reason) def handle_delete_snapshot(self, snapshot): backup_id = snapshot['resource_data'].get('backup_id') if not backup_id: return try: self.client().backups.delete(backup_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) return else: return backup_id def check_delete_snapshot_complete(self, backup_id): if not backup_id: return True try: self.client().backups.get(backup_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) return True else: return False def _build_exclusive_options(self): exclusive_options = [] if self.properties.get(self.SNAPSHOT_ID): exclusive_options.append(self.SNAPSHOT_ID) if self.properties.get(self.SOURCE_VOLID): exclusive_options.append(self.SOURCE_VOLID) if self.properties.get(self.IMAGE): exclusive_options.append(self.IMAGE) if self.properties.get(self.IMAGE_REF): exclusive_options.append(self.IMAGE_REF) return exclusive_options def _validate_create_sources(self): exclusive_options = self._build_exclusive_options() size = self.properties.get(self.SIZE) if size is None and len(exclusive_options) != 1: msg = (_('If neither "%(backup_id)s" nor "%(size)s" is ' 'provided, one and only one of ' '"%(image)s", "%(image_ref)s", "%(source_vol)s", ' '"%(snapshot_id)s" must be specified, but currently ' 'specified options: %(exclusive_options)s.') % { 'backup_id': self.BACKUP_ID, 'size': self.SIZE, 'image': self.IMAGE, 'image_ref': self.IMAGE_REF, 'source_vol': self.SOURCE_VOLID, 'snapshot_id': self.SNAPSHOT_ID, 'exclusive_options': exclusive_options }) raise exception.StackValidationFailed(message=msg) elif size and len(exclusive_options) > 1: msg = (_('If "%(size)s" is provided, only one of ' '"%(image)s", "%(image_ref)s", "%(source_vol)s", ' '"%(snapshot_id)s" can be specified, but currently ' 'specified options: %(exclusive_options)s.') % { 'size': self.SIZE, 'image': self.IMAGE, 'image_ref': self.IMAGE_REF, 'source_vol': self.SOURCE_VOLID, 'snapshot_id': self.SNAPSHOT_ID, 'exclusive_options': exclusive_options }) raise exception.StackValidationFailed(message=msg) def validate(self): """Validate provided params.""" res = super(CinderVolume, self).validate() if res is not None: return res # Scheduler hints are only supported from Cinder API v2 if (self.properties[self.CINDER_SCHEDULER_HINTS] and self.client().volume_api_version == 1): raise exception.StackValidationFailed( message=_('Scheduler hints are not supported by the current ' 'volume API.')) # Multi attach is only supported from Cinder API v2 if (self.properties[self.MULTI_ATTACH] and self.client().volume_api_version == 1): raise exception.StackValidationFailed( message=_('Multiple attach is not supported by the current ' 'volume API. Use this property since ' 'Cinder API v2.')) # can not specify both image and imageRef image = self.properties.get(self.IMAGE) imageRef = self.properties.get(self.IMAGE_REF) if image and imageRef: raise exception.ResourcePropertyConflict(self.IMAGE, self.IMAGE_REF) # if not create from backup, need to check other create sources if not self.properties.get(self.BACKUP_ID): self._validate_create_sources() def handle_restore(self, defn, restore_data): backup_id = restore_data['resource_data']['backup_id'] # we can't ignore 'size' property: if user update the size # of volume after snapshot, we need to change to old size # when restore the volume. ignore_props = (self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID) props = dict((key, value) for ( key, value) in six.iteritems(defn.properties(self.properties_schema)) if key not in ignore_props and value is not None) props[self.BACKUP_ID] = backup_id return defn.freeze(properties=props)
class KeyPair(resource.Resource): """ A resource for creating Nova key pairs. **Note** that if a new key is generated setting `save_private_key` to `True` results in the system saving the private key which can then be retrieved via the `private_key` attribute of this resource. Setting the `public_key` property means that the `private_key` attribute of this resource will always return an empty string regardless of the `save_private_key` setting since there will be no private key data to save. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, SAVE_PRIVATE_KEY, PUBLIC_KEY, ) = ( 'name', 'save_private_key', 'public_key', ) ATTRIBUTES = ( PUBLIC_KEY_ATTR, PRIVATE_KEY_ATTR, ) = ( 'public_key', 'private_key', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('The name of the key pair.'), required=True, constraints=[ constraints.Length(min=1, max=255) ] ), SAVE_PRIVATE_KEY: properties.Schema( properties.Schema.BOOLEAN, _('True if the system should remember a generated private key; ' 'False otherwise.'), default=False ), PUBLIC_KEY: properties.Schema( properties.Schema.STRING, _('The optional public key. This allows users to supply the ' 'public key from a pre-existing key pair. If not supplied, a ' 'new key pair will be generated.') ), } attributes_schema = { PUBLIC_KEY_ATTR: attributes.Schema( _('The public key.'), type=attributes.Schema.STRING ), PRIVATE_KEY_ATTR: attributes.Schema( _('The private key if it has been saved.'), cache_mode=attributes.Schema.CACHE_NONE, type=attributes.Schema.STRING ), } default_client_name = 'nova' def __init__(self, name, json_snippet, stack): super(KeyPair, self).__init__(name, json_snippet, stack) self._public_key = None @property def private_key(self): """Return the private SSH key for the resource.""" if self.properties[self.SAVE_PRIVATE_KEY]: return self.data().get('private_key', '') else: return '' @property def public_key(self): """Return the public SSH key for the resource.""" if not self._public_key: if self.properties[self.PUBLIC_KEY]: self._public_key = self.properties[self.PUBLIC_KEY] elif self.resource_id: nova_key = self.client_plugin().get_keypair(self.resource_id) self._public_key = nova_key.public_key return self._public_key def handle_create(self): pub_key = self.properties[self.PUBLIC_KEY] or None new_keypair = self.nova().keypairs.create(self.properties[self.NAME], public_key=pub_key) if (self.properties[self.SAVE_PRIVATE_KEY] and hasattr(new_keypair, 'private_key')): self.data_set('private_key', new_keypair.private_key, True) self.resource_id_set(new_keypair.id) def handle_delete(self): if self.resource_id: try: self.nova().keypairs.delete(self.resource_id) except Exception as e: self.client_plugin().ignore_not_found(e) def handle_check(self): self.nova().keypairs.get(self.resource_id) def _resolve_attribute(self, key): attr_fn = {self.PRIVATE_KEY_ATTR: self.private_key, self.PUBLIC_KEY_ATTR: self.public_key} return six.text_type(attr_fn[key]) def FnGetRefId(self): return self.resource_id
class Order(resource.Resource): """A resource allowing for the generation secret material by Barbican. The resource allows to generate some secret material. It can be, for example, some key or certificate. The order encapsulates the workflow and history for the creation of a secret. The time to generate a secret can vary depending on the type of secret. """ support_status = support.SupportStatus(version='2014.2') default_client_name = 'barbican' entity = 'orders' PROPERTIES = (NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION, ALGORITHM, BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN, SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA, PASS_PHRASE) = ('name', 'payload_content_type', 'mode', 'expiration', 'algorithm', 'bit_length', 'type', 'request_type', 'subject_dn', 'source_container_ref', 'ca_id', 'profile', 'request_data', 'pass_phrase') ATTRIBUTES = (STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY, CERTIFICATE, INTERMEDIATES, CONTAINER_REF) = ('status', 'order_ref', 'secret_ref', 'public_key', 'private_key', 'certificate', 'intermediates', 'container_ref') ORDER_TYPES = (KEY, ASYMMETRIC, CERTIFICATE) = ('key', 'asymmetric', 'certificate') # full-cmc is declared but not yet supported in barbican REQUEST_TYPES = (STORED_KEY, SIMPLE_CMC, CUSTOM) = ('stored-key', 'simple-cmc', 'custom') ALLOWED_PROPERTIES_FOR_TYPE = { KEY: [NAME, ALGORITHM, BIT_LENGTH, MODE, PAYLOAD_CONTENT_TYPE, EXPIRATION], ASYMMETRIC: [ NAME, ALGORITHM, BIT_LENGTH, MODE, PASS_PHRASE, PAYLOAD_CONTENT_TYPE, EXPIRATION ], CERTIFICATE: [ NAME, REQUEST_TYPE, SUBJECT_DN, SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA ] } properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Human readable name for the secret.'), ), PAYLOAD_CONTENT_TYPE: properties.Schema( properties.Schema.STRING, _('The type/format the secret data is provided in.'), ), EXPIRATION: properties.Schema( properties.Schema.STRING, _('The expiration date for the secret in ISO-8601 format.'), constraints=[ constraints.CustomConstraint('expiration'), ], ), ALGORITHM: properties.Schema( properties.Schema.STRING, _('The algorithm type used to generate the secret. ' 'Required for key and asymmetric types of order.'), ), BIT_LENGTH: properties.Schema( properties.Schema.INTEGER, _('The bit-length of the secret. Required for key and ' 'asymmetric types of order.'), ), MODE: properties.Schema( properties.Schema.STRING, _('The type/mode of the algorithm associated with the secret ' 'information.'), ), TYPE: properties.Schema( properties.Schema.STRING, _('The type of the order.'), constraints=[ constraints.AllowedValues(ORDER_TYPES), ], required=True, support_status=support.SupportStatus(version='5.0.0'), ), REQUEST_TYPE: properties.Schema( properties.Schema.STRING, _('The type of the certificate request.'), support_status=support.SupportStatus(version='5.0.0'), constraints=[constraints.AllowedValues(REQUEST_TYPES)]), SUBJECT_DN: properties.Schema( properties.Schema.STRING, _('The subject of the certificate request.'), support_status=support.SupportStatus(version='5.0.0'), ), SOURCE_CONTAINER_REF: properties.Schema( properties.Schema.STRING, _('The source of certificate request.'), support_status=support.SupportStatus(version='5.0.0'), constraints=[constraints.CustomConstraint('barbican.container')], ), CA_ID: properties.Schema( properties.Schema.STRING, _('The identifier of the CA to use.'), support_status=support.SupportStatus(version='5.0.0'), ), PROFILE: properties.Schema( properties.Schema.STRING, _('The profile of certificate to use.'), support_status=support.SupportStatus(version='5.0.0'), ), REQUEST_DATA: properties.Schema( properties.Schema.STRING, _('The content of the CSR. Only for certificate orders.'), support_status=support.SupportStatus(version='5.0.0'), ), PASS_PHRASE: properties.Schema( properties.Schema.STRING, _('The passphrase of the created key. Can be set only ' 'for asymmetric type of order.'), support_status=support.SupportStatus(version='5.0.0'), ), } attributes_schema = { STATUS: attributes.Schema(_('The status of the order.'), type=attributes.Schema.STRING), ORDER_REF: attributes.Schema(_('The URI to the order.'), type=attributes.Schema.STRING), SECRET_REF: attributes.Schema(_('The URI to the created secret.'), type=attributes.Schema.STRING), CONTAINER_REF: attributes.Schema( _('The URI to the created container.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), PUBLIC_KEY: attributes.Schema( _('The payload of the created public key, if available.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), PRIVATE_KEY: attributes.Schema( _('The payload of the created private key, if available.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), CERTIFICATE: attributes.Schema( _('The payload of the created certificate, if available.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), INTERMEDIATES: attributes.Schema( _('The payload of the created intermediates, if available.'), support_status=support.SupportStatus(version='5.0.0'), type=attributes.Schema.STRING), } def handle_create(self): info = dict( (k, v) for k, v in self.properties.items() if v is not None) order = self.client().orders.create(**info) order_ref = order.submit() self.resource_id_set(order_ref) # NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string # need not to be fixed re LP bug #1393268 return order_ref def validate(self): super(Order, self).validate() if self.properties[self.TYPE] != self.CERTIFICATE: if (self.properties[self.ALGORITHM] is None or self.properties[self.BIT_LENGTH] is None): msg = _("Properties %(algorithm)s and %(bit_length)s are " "required for %(type)s type of order.") % { 'algorithm': self.ALGORITHM, 'bit_length': self.BIT_LENGTH, 'type': self.properties[self.TYPE] } raise exception.StackValidationFailed(message=msg) else: if (self.properties[self.PROFILE] and not self.properties[self.CA_ID]): raise exception.ResourcePropertyDependency(prop1=self.PROFILE, prop2=self.CA_ID) declared_props = sorted([ k for k, v in self.properties.items() if k != self.TYPE and v is not None ]) allowed_props = sorted( self.ALLOWED_PROPERTIES_FOR_TYPE[self.properties[self.TYPE]]) diff = sorted(set(declared_props) - set(allowed_props)) if diff: msg = _("Unexpected properties: %(unexpected)s. Only these " "properties are allowed for %(type)s type of order: " "%(allowed)s.") % { 'unexpected': ', '.join(diff), 'type': self.properties[self.TYPE], 'allowed': ', '.join(allowed_props) } raise exception.StackValidationFailed(message=msg) def check_create_complete(self, order_href): order = self.client().orders.get(order_href) if order.status == 'ERROR': reason = order.error_reason code = order.error_status_code msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s") % { 'name': self.name, 'code': code, 'reason': reason }) raise exception.Error(msg) return order.status == 'ACTIVE' def _resolve_attribute(self, name): if self.resource_id is None: return client = self.client() order = client.orders.get(self.resource_id) if name in (self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE, self.INTERMEDIATES): container = client.containers.get(order.container_ref) secret = getattr(container, name) return secret.payload return getattr(order, name)
class FloatingIP(neutron.NeutronResource): """A resource for managing Neutron floating ips. Floating IP addresses can change their association between routers by action of the user. One of the most common use cases for floating IPs is to provide public IP addresses to a private cloud, where there are a limited number of IP addresses available. Another is for a public cloud user to have a "static" IP address that can be reassigned when an instance is upgraded or moved. """ entity = 'floatingip' PROPERTIES = ( FLOATING_NETWORK_ID, FLOATING_NETWORK, FLOATING_SUBNET, VALUE_SPECS, PORT_ID, FIXED_IP_ADDRESS, FLOATING_IP_ADDRESS, DNS_NAME, DNS_DOMAIN, ) = ( 'floating_network_id', 'floating_network', 'floating_subnet', 'value_specs', 'port_id', 'fixed_ip_address', 'floating_ip_address', 'dns_name', 'dns_domain', ) ATTRIBUTES = ( ROUTER_ID, TENANT_ID, FLOATING_NETWORK_ID_ATTR, FIXED_IP_ADDRESS_ATTR, FLOATING_IP_ADDRESS_ATTR, PORT_ID_ATTR, ) = ( 'router_id', 'tenant_id', 'floating_network_id', 'fixed_ip_address', 'floating_ip_address', 'port_id', ) properties_schema = { FLOATING_NETWORK_ID: properties.Schema( properties.Schema.STRING, support_status=support.SupportStatus( status=support.HIDDEN, version='5.0.0', message=_('Use property %s.') % FLOATING_NETWORK, previous_status=support.SupportStatus( status=support.DEPRECATED, version='2014.2')), constraints=[constraints.CustomConstraint('neutron.network')], ), FLOATING_NETWORK: properties.Schema( properties.Schema.STRING, _('Network to allocate floating IP from.'), support_status=support.SupportStatus(version='2014.2'), required=True, constraints=[constraints.CustomConstraint('neutron.network')], ), FLOATING_SUBNET: properties.Schema( properties.Schema.STRING, _('Subnet to allocate floating IP from.'), support_status=support.SupportStatus(version='9.0.0'), constraints=[constraints.CustomConstraint('neutron.subnet')], ), VALUE_SPECS: properties.Schema( properties.Schema.MAP, _('Extra parameters to include in the "floatingip" object in the ' 'creation request. Parameters are often specific to installed ' 'hardware or extensions.'), default={}), PORT_ID: properties.Schema( properties.Schema.STRING, _('ID of an existing port with at least one IP address to ' 'associate with this floating IP.'), update_allowed=True, constraints=[constraints.CustomConstraint('neutron.port')]), FIXED_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address to use if the port has multiple addresses.'), update_allowed=True, constraints=[constraints.CustomConstraint('ip_addr')]), FLOATING_IP_ADDRESS: properties.Schema( properties.Schema.STRING, _('IP address of the floating IP. NOTE: The default policy ' 'setting in Neutron restricts usage of this property to ' 'administrative users only.'), constraints=[constraints.CustomConstraint('ip_addr')], support_status=support.SupportStatus(version='5.0.0'), ), DNS_NAME: properties.Schema( properties.Schema.STRING, _('DNS name associated with floating ip.'), update_allowed=True, constraints=[constraints.CustomConstraint('rel_dns_name')], support_status=support.SupportStatus(version='7.0.0'), ), DNS_DOMAIN: properties.Schema( properties.Schema.STRING, _('DNS domain associated with floating ip.'), update_allowed=True, constraints=[constraints.CustomConstraint('dns_domain')], support_status=support.SupportStatus(version='7.0.0'), ), } attributes_schema = { ROUTER_ID: attributes.Schema(_( 'ID of the router used as gateway, set when associated with a ' 'port.'), type=attributes.Schema.STRING), TENANT_ID: attributes.Schema(_('The tenant owning this floating IP.'), type=attributes.Schema.STRING), FLOATING_NETWORK_ID_ATTR: attributes.Schema( _('ID of the network in which this IP is allocated.'), type=attributes.Schema.STRING), FIXED_IP_ADDRESS_ATTR: attributes.Schema( _('IP address of the associated port, if specified.'), type=attributes.Schema.STRING, cache_mode=attributes.Schema.CACHE_NONE), FLOATING_IP_ADDRESS_ATTR: attributes.Schema(_('The allocated address of this IP.'), type=attributes.Schema.STRING), PORT_ID_ATTR: attributes.Schema(_('ID of the port associated with this IP.'), type=attributes.Schema.STRING, cache_mode=attributes.Schema.CACHE_NONE), } def translation_rules(self, props): return [ translation.TranslationRule(props, translation.TranslationRule.REPLACE, [self.FLOATING_NETWORK], value_path=[self.FLOATING_NETWORK_ID]), translation.TranslationRule(props, translation.TranslationRule.RESOLVE, [self.FLOATING_NETWORK], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='network'), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.FLOATING_SUBNET], client_plugin=self.client_plugin(), finder='find_resourceid_by_name_or_id', entity='subnet', ) ] def _add_router_interface_dependencies(self, deps, resource): def port_on_subnet(resource, subnet): if not resource.has_interface('OS::Neutron::Port'): return False fixed_ips = resource.properties.get(port.Port.FIXED_IPS) if not fixed_ips: # During create we have only unresolved value for # functions, so can not use None value for building # correct dependencies. Depend on all RouterInterfaces # when the port has no fixed IP specified, since we # can't safely assume that any are in different # networks. if subnet is None: return True p_net = (resource.properties.get(port.Port.NETWORK) or resource.properties.get(port.Port.NETWORK_ID)) if p_net: network = self.client().show_network(p_net)['network'] return subnet in network['subnets'] else: for fixed_ip in resource.properties.get(port.Port.FIXED_IPS): port_subnet = (fixed_ip.get(port.Port.FIXED_IP_SUBNET) or fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID)) if subnet == port_subnet: return True return False interface_subnet = ( resource.properties.get(router.RouterInterface.SUBNET) or resource.properties.get(router.RouterInterface.SUBNET_ID)) for d in deps.graph()[self]: if port_on_subnet(d, interface_subnet): deps += (self, resource) break def add_dependencies(self, deps): super(FloatingIP, self).add_dependencies(deps) for resource in six.itervalues(self.stack): # depend on any RouterGateway in this template with the same # network_id as this floating_network_id if resource.has_interface('OS::Neutron::RouterGateway'): gateway_network = resource.properties.get( router.RouterGateway.NETWORK) or resource.properties.get( router.RouterGateway.NETWORK_ID) floating_network = self.properties[self.FLOATING_NETWORK] if gateway_network == floating_network: deps += (self, resource) # depend on any RouterInterface in this template which interfaces # with the same subnet that this floating IP's port is assigned # to elif resource.has_interface('OS::Neutron::RouterInterface'): self._add_router_interface_dependencies(deps, resource) # depend on Router with EXTERNAL_GATEWAY_NETWORK property # this template with the same network_id as this # floating_network_id elif resource.has_interface('OS::Neutron::Router'): gateway = resource.properties.get( router.Router.EXTERNAL_GATEWAY) if gateway: gateway_network = gateway.get( router.Router.EXTERNAL_GATEWAY_NETWORK) floating_network = self.properties[self.FLOATING_NETWORK] if gateway_network == floating_network: deps += (self, resource) def validate(self): super(FloatingIP, self).validate() # fixed_ip_address cannot be specified without a port_id if self.properties[self.PORT_ID] is None and self.properties[ self.FIXED_IP_ADDRESS] is not None: raise exception.ResourcePropertyDependency( prop1=self.FIXED_IP_ADDRESS, prop2=self.PORT_ID) def handle_create(self): props = self.prepare_properties(self.properties, self.physical_resource_name()) props['floating_network_id'] = props.pop(self.FLOATING_NETWORK) if self.FLOATING_SUBNET in props: props['subnet_id'] = props.pop(self.FLOATING_SUBNET) fip = self.client().create_floatingip({'floatingip': props})['floatingip'] self.resource_id_set(fip['id']) def handle_delete(self): with self.client_plugin().ignore_not_found: self.client().delete_floatingip(self.resource_id) return True def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: port_id = prop_diff.get(self.PORT_ID, self.properties[self.PORT_ID]) fixed_ip_address = prop_diff.get( self.FIXED_IP_ADDRESS, self.properties[self.FIXED_IP_ADDRESS]) request_body = { 'floatingip': { 'port_id': port_id, 'fixed_ip_address': fixed_ip_address } } self.client().update_floatingip(self.resource_id, request_body)
class VenafiCertificate(resource.Resource): """A resource for creating Venafi certificates. """ support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( NAME, CN, KEY_PASSWORD, KEY_TYPE, KEY_LENGTH, KEY_CURVE, SANs, ZONE, SAVE_PRIVATE_KEY, VENAFI_URL, TPP_USER, TPP_PASSWORD, API_KEY, TRUST_BUNDLE, FAKE, ACCESS_TOKEN ) = ( 'name', 'common_name', 'key_password', 'key_type', 'key_length', 'key_curve', 'sans', 'zone', 'save_private_key', 'venafi_url', 'tpp_user', 'tpp_password', 'api_key', 'trust_bundle', 'fake', 'access_token' ) ATTRIBUTES = ( CERTIFICATE_ATTR, PRIVATE_KEY_ATTR, CHAIN_ATTR, CSR_ATTR, ) = ( 'certificate', 'private_key', 'chain', 'csr', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('The name of certificate'), required=True, constraints=[ constraints.Length(min=1, max=255) ] ), CN: properties.Schema( properties.Schema.STRING, _('The common name of certificate'), required=True, constraints=[ constraints.Length(min=1, max=255) ] ), KEY_PASSWORD: properties.Schema( properties.Schema.STRING, _("Password for encrypting private key"), default=None, ), KEY_TYPE: properties.Schema( properties.Schema.STRING, _("Cryptographic key type"), default="RSA", constraints=[constraints.AllowedValues(("RSA", "ECDSA"))], ), KEY_LENGTH: properties.Schema( properties.Schema.INTEGER, _("Key length (only for RSA key_type)"), default=2048, constraints=[constraints.AllowedValues((1024, 2048, 4096, 8192))], ), KEY_CURVE: properties.Schema( properties.Schema.STRING, _("Key elliptic curve (only for ECDSA key_type)"), default="p521", constraints=[constraints.AllowedValues(("p521", "p256", "p224", "p384"))], ), SANs: properties.Schema( properties.Schema.LIST, _("List of Subject Alternative Names"), default=tuple(), ), ZONE: properties.Schema( properties.Schema.STRING, _("Venafi Trust Platform or Cloud zone name"), required=True, constraints=[constraints.Length(min=1, max=255)] ), SAVE_PRIVATE_KEY: properties.Schema( properties.Schema.BOOLEAN, _('True if the system should remember a generated private key; ' 'False otherwise.'), default=False ), VENAFI_URL: properties.Schema( properties.Schema.STRING, _("Trust Platform or Venafi Cloud url (required for TPP connection and optional for Cloud)"), ), TPP_USER: properties.Schema( properties.Schema.STRING, _("Trust Platform user (required for TPP connection)"), ), TPP_PASSWORD: properties.Schema( properties.Schema.STRING, _("Trust Platform password (required for TPP connection)"), ), API_KEY: properties.Schema( properties.Schema.STRING, _("Venafi Cloud api key (required for Cloud connection)"), ), TRUST_BUNDLE: properties.Schema( properties.Schema.STRING, _("Path to server certificate trust bundle or base64 encoded trust bundle certificate.") ), FAKE: properties.Schema( properties.Schema.BOOLEAN, _("Use fake testong connection if true"), default=False ), ACCESS_TOKEN: properties.Schema( properties.Schema.STRING, _("Access token for TPP, user should use this for authentication") ) } attributes_schema = { CERTIFICATE_ATTR: attributes.Schema( _('Venafi certificate.'), type=attributes.Schema.STRING ), PRIVATE_KEY_ATTR: attributes.Schema( _('Venafi private key.'), type=attributes.Schema.STRING ), CHAIN_ATTR: attributes.Schema( _('Venafi certificate chain.'), type=attributes.Schema.STRING ), CSR_ATTR: attributes.Schema( _('Venafi certificate request.'), type=attributes.Schema.STRING ), } default_client_name = 'nova' entity = 'venafi_certificate' def __init__(self, name, json_snippet, stack): super(VenafiCertificate, self).__init__(name, json_snippet, stack) self._cache = None self.conn = self.get_connection() @property def certificate(self): """Return Venafi certificate for the resource.""" return self.data().get('certificate', '') @property def chain(self): """Return Venafi certificate chain for the resource.""" return self.data().get('chain', '') @property def csr(self): """Return Venafi certificate request for the resource.""" return self.data().get('csr', '') @property def private_key(self): """Return Venafi certificate private key for the resource.""" if self.properties[self.SAVE_PRIVATE_KEY]: return self.data().get('private_key', '') else: return '' def get_connection(self): url = self.properties[self.VENAFI_URL] user = self.properties[self.TPP_USER] password = self.properties[self.TPP_PASSWORD] token = self.properties[self.API_KEY] fake = self.properties[self.FAKE] access_token = self.properties[self.ACCESS_TOKEN] if fake: LOG.info("Fake is %s. Will use fake connection", fake) trust_bundle = self.properties[self.TRUST_BUNDLE] if not trust_bundle: if access_token and access_token != "": return venafi_connection( url=url, user=None, password=None, access_token=access_token, refresh_token=None, http_request_kwargs=None, api_key=None, fake=fake) else: return Connection(url, token, user, password, fake=fake) try: decoded_bundle = base64.b64decode(trust_bundle) except: LOG.info("Trust bundle %s is not base64 encoded string. Considering it's a file", trust_bundle) if not path.isfile(trust_bundle): raise IOError(ENOENT, 'Not a file', trust_bundle) else: tmp_dir = tempfile.gettempdir() f = open(path.join(tmp_dir, 'venafi-temp-trust-bundle.pem'), "wb") LOG.info("Saving decoded trust bundle to temp file %s", f.name) f.write(decoded_bundle) f.close() trust_bundle = f.name if access_token and access_token != "": return venafi_connection( url=url, user=None, password=None, access_token=access_token, refresh_token=None, http_request_kwargs={"verify": trust_bundle}, api_key=None, fake=fake) else: return Connection(url, token, user, password, http_request_kwargs={"verify": trust_bundle}, fake=fake) def enroll(self): LOG.info("Running enroll") common_name = self.properties[self.CN] sans = self.properties[self.SANs] privatekey_passphrase = self.properties[self.KEY_PASSWORD] privatekey_type = self.properties[self.KEY_TYPE] curve = self.properties[self.KEY_CURVE] key_size = self.properties[self.KEY_LENGTH] zone = self.properties[self.ZONE] LOG.info("Reading zone config from %s", zone) zone_config = self.conn.read_zone_conf(zone) request = CertificateRequest( common_name=common_name, origin="OpenStack" ) request.update_from_zone_config(zone_config) ip_addresses = [] email_addresses = [] san_dns = [] if len(sans) > 0: LOG.info("Configuring SANs from list %s", sans) for n in sans: if n.lower().startswith(("ip:", "ip address:")): ip = n.split(":", 1)[1] LOG.info("Adding ip %s to ip_addresses", ip) ip_addresses.append(ip) elif n.lower().startswith("dns:"): ns = n.split(":", 1)[1] LOG.info("Adding domain name %s to san_dns", ns) san_dns.append(ns) elif n.lower().startswith("email:"): mail = n.split(":", 1)[1] LOG.info("Adding mail %s to email_addresses", mail) email_addresses.append(mail) else: raise Exception("Failed to determine extension type: %s" % n) request.ip_addresses = ip_addresses request.san_dns = san_dns request.email_addresses = email_addresses LOG.info("Request is %s, %s, %s", request.ip_addresses, request.san_dns, request.email_addresses) if privatekey_passphrase is not None: request.key_password = privatekey_passphrase if privatekey_type: request.key_type = KeyType(privatekey_type, key_size or curve) self.conn.request_cert(request, zone) t = time.time() while True: LOG.info("Trying to retrieve certificate") cert = self.conn.retrieve_cert(request) # type: vcert.Certificate if cert or time.time() > t + 600: break else: time.sleep(5) LOG.info("Got certificate: %s", cert.cert) LOG.info("Got chain: %s", cert.chain) return {self.CHAIN_ATTR: cert.chain, self.CERTIFICATE_ATTR: cert.cert, self.PRIVATE_KEY_ATTR: request.private_key_pem, self.CSR_ATTR: request.csr} def handle_create(self): LOG.info("Creating Venafi certificate") self._cache = self.enroll() LOG.info("Saving to data certificate: %s", self._cache[self.CERTIFICATE_ATTR]) self.data_set('certificate', self._cache[self.CERTIFICATE_ATTR], redact=False) chain = '\n'.join(self._cache[self.CHAIN_ATTR]) if len(chain) > 0: LOG.info("Saving to data chain: %s", chain) self.data_set('chain', chain, redact=False) LOG.info("Saving to data private_key.") self.data_set('private_key', self._cache[self.PRIVATE_KEY_ATTR], redact=False) LOG.info("Saving CSR to data") self.data_set('csr', self._cache[self.CSR_ATTR], redact=False) def _resolve_attribute(self, name): attr_fn = {self.CERTIFICATE_ATTR: self.certificate, self.CHAIN_ATTR: self.chain, self.PRIVATE_KEY_ATTR: self.private_key, self.CSR_ATTR: self.csr} return six.text_type(attr_fn[name]) def get_reference_id(self): return self.resource_id
class TroveCluster(resource.Resource): support_status = support.SupportStatus(version='2015.1') TROVE_STATUS = ( ERROR, FAILED, ACTIVE, ) = ( 'ERROR', 'FAILED', 'ACTIVE', ) TROVE_STATUS_REASON = { FAILED: _('The database instance was created, but heat failed to set ' 'up the datastore. If a database instance is in the FAILED ' 'state, it should be deleted and a new one should be ' 'created.'), ERROR: _('The last operation for the database instance failed due to ' 'an error.'), } BAD_STATUSES = (ERROR, FAILED) PROPERTIES = ( NAME, DATASTORE_TYPE, DATASTORE_VERSION, INSTANCES, ) = ( 'name', 'datastore_type', 'datastore_version', 'instances', ) _INSTANCE_KEYS = ( FLAVOR, VOLUME_SIZE, ) = ( 'flavor', 'volume_size', ) ATTRIBUTES = ( INSTANCES_ATTR, IP ) = ( 'instances', 'ip' ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of the cluster to create.'), constraints=[ constraints.Length(max=255), ] ), DATASTORE_TYPE: properties.Schema( properties.Schema.STRING, _("Name of registered datastore type."), required=True, constraints=[ constraints.Length(max=255) ] ), DATASTORE_VERSION: properties.Schema( properties.Schema.STRING, _("Name of the registered datastore version. " "It must exist for provided datastore type. " "Defaults to using single active version. " "If several active versions exist for provided datastore type, " "explicit value for this parameter must be specified."), required=True, constraints=[constraints.Length(max=255)] ), INSTANCES: properties.Schema( properties.Schema.LIST, _("List of database instances."), required=True, schema=properties.Schema( properties.Schema.MAP, schema={ FLAVOR: properties.Schema( properties.Schema.STRING, _('Flavor of the instance.'), required=True, constraints=[ constraints.CustomConstraint('trove.flavor') ] ), VOLUME_SIZE: properties.Schema( properties.Schema.INTEGER, _('Size of the instance disk volume in GB.'), required=True, constraints=[ constraints.Range(1, 150), ] ) } ) ) } attributes_schema = { INSTANCES: attributes.Schema( _("A list of instances ids."), type=attributes.Schema.LIST ), IP: attributes.Schema( _("A list of cluster instance IPs."), type=attributes.Schema.LIST ) } default_client_name = 'trove' entity = 'clusters' def _cluster_name(self): return self.properties[self.NAME] or self.physical_resource_name() def handle_create(self): datastore_type = self.properties[self.DATASTORE_TYPE] datastore_version = self.properties[self.DATASTORE_VERSION] # convert instances to format required by troveclient instances = [] for instance in self.properties[self.INSTANCES]: instances.append({ 'flavorRef': self.client_plugin().find_flavor_by_name_or_id( instance[self.FLAVOR]), 'volume': {'size': instance[self.VOLUME_SIZE]} }) args = { 'name': self._cluster_name(), 'datastore': datastore_type, 'datastore_version': datastore_version, 'instances': instances } cluster = self.client().clusters.create(**args) self.resource_id_set(cluster.id) return cluster.id def _refresh_cluster(self, cluster_id): try: cluster = self.client().clusters.get(cluster_id) return cluster except Exception as exc: if self.client_plugin().is_over_limit(exc): LOG.warning(_LW("Stack %(name)s (%(id)s) received an " "OverLimit response during clusters.get():" " %(exception)s"), {'name': self.stack.name, 'id': self.stack.id, 'exception': exc}) return None else: raise def check_create_complete(self, cluster_id): cluster = self._refresh_cluster(cluster_id) if cluster is None: return False for instance in cluster.instances: if instance['status'] in self.BAD_STATUSES: raise exception.ResourceInError( resource_status=instance['status'], status_reason=self.TROVE_STATUS_REASON.get( instance['status'], _("Unknown"))) if instance['status'] != self.ACTIVE: return False LOG.info(_LI("Cluster '%s' has been created"), cluster.name) return True def handle_delete(self): if not self.resource_id: return try: cluster = self.client().clusters.get(self.resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) else: cluster.delete() return cluster.id def check_delete_complete(self, cluster_id): if not cluster_id: return True try: # For some time trove cluster may continue to live self._refresh_cluster(cluster_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) return True return False def validate(self): res = super(TroveCluster, self).validate() if res: return res datastore_type = self.properties[self.DATASTORE_TYPE] datastore_version = self.properties[self.DATASTORE_VERSION] self.client_plugin().validate_datastore( datastore_type, datastore_version, self.DATASTORE_TYPE, self.DATASTORE_VERSION) def _resolve_attribute(self, name): if name == self.INSTANCES_ATTR: instances = [] cluster = self.client().clusters.get(self.resource_id) for instance in cluster.instances: instances.append(instance['id']) return instances elif name == self.IP: cluster = self.client().clusters.get(self.resource_id) return cluster.ip
class Resource(object): ACTIONS = ( INIT, CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT, SNAPSHOT, CHECK, ) = ( 'INIT', 'CREATE', 'DELETE', 'UPDATE', 'ROLLBACK', 'SUSPEND', 'RESUME', 'ADOPT', 'SNAPSHOT', 'CHECK', ) STATUSES = (IN_PROGRESS, FAILED, COMPLETE) = ('IN_PROGRESS', 'FAILED', 'COMPLETE') # If True, this resource must be created before it can be referenced. strict_dependency = True # Resource implementation set this to the subset of resource properties # supported for handle_update, used by update_template_diff_properties update_allowed_properties = () # Resource implementations set this to the name: description dictionary # that describes the appropriate resource attributes attributes_schema = {} # If True, this resource may perform authenticated API requests # throughout its lifecycle requires_deferred_auth = False # Limit to apply to physical_resource_name() size reduction algorithm. # If set to None no limit will be applied. physical_resource_name_limit = 255 support_status = support.SupportStatus() # Default name to use for calls to self.client() default_client_name = None def __new__(cls, name, definition, stack): '''Create a new Resource of the appropriate class for its type.''' assert isinstance(definition, rsrc_defn.ResourceDefinition) if cls != Resource: # Call is already for a subclass, so pass it through ResourceClass = cls else: from heat.engine.resources import template_resource # Select the correct subclass to instantiate. # Note: If the current stack is an implementation of # a resource type (a TemplateResource mapped in the environment) # then don't infinitely recurse by creating a child stack # of the same type. Instead get the next match which will get # us closer to a concrete class. def get_ancestor_template_resources(): """Return an ancestry list (TemplateResources only).""" parent = stack.parent_resource while parent is not None: if isinstance(parent, template_resource.TemplateResource): yield parent.template_name parent = parent.stack.parent_resource ancestor_list = set(get_ancestor_template_resources()) def accept_class(res_info): if not isinstance(res_info, environment.TemplateResourceInfo): return True return res_info.template_name not in ancestor_list registry = stack.env.registry try: ResourceClass = registry.get_class(definition.resource_type, resource_name=name, accept_fn=accept_class) except exception.NotFound: ResourceClass = template_resource.TemplateResource assert issubclass(ResourceClass, Resource) return super(Resource, cls).__new__(ResourceClass) def __init__(self, name, definition, stack): def _validate_name(res_name): if '/' in res_name: message = _('Resource name may not contain "/"') raise exception.StackValidationFailed(message=message) _validate_name(name) self.stack = stack self.context = stack.context self.name = name self.t = definition self.reparse() self.attributes = attributes.Attributes(self.name, self.attributes_schema, self._resolve_attribute) self.abandon_in_progress = False self.resource_id = None # if the stack is being deleted, assume we've already been deleted if stack.action == stack.DELETE: self.action = self.DELETE else: self.action = self.INIT self.status = self.COMPLETE self.status_reason = '' self.id = None self._data = {} self._rsrc_metadata = None self._stored_properties_data = None self.created_time = None self.updated_time = None self._rpc_client = None resource = stack.db_resource_get(name) if resource: self._load_data(resource) def rpc_client(self): '''Return a client for making engine RPC calls.''' if not self._rpc_client: self._rpc_client = rpc_client.EngineClient() return self._rpc_client def _load_data(self, resource): '''Load the resource state from its DB representation.''' self.resource_id = resource.nova_instance self.action = resource.action self.status = resource.status self.status_reason = resource.status_reason self.id = resource.id try: self._data = db_api.resource_data_get_all(self, resource.data) except exception.NotFound: self._data = {} self._rsrc_metadata = resource.rsrc_metadata self._stored_properties_data = resource.properties_data self.created_time = resource.created_at self.updated_time = resource.updated_at def reparse(self): self.properties = self.t.properties(self.properties_schema, self.context) def __eq__(self, other): '''Allow == comparison of two resources.''' # For the purposes of comparison, we declare two resource objects # equal if their names and parsed_templates are the same if isinstance(other, Resource): return (self.name == other.name) and (self.parsed_template() == other.parsed_template()) return NotImplemented def __ne__(self, other): '''Allow != comparison of two resources.''' result = self.__eq__(other) if result is NotImplemented: return result return not result @property def metadata(self): """DEPRECATED. use method metadata_get instead.""" warnings.warn( 'metadata attribute is deprecated, ' 'use method metadata_get instead', DeprecationWarning) return self.metadata_get(True) @metadata.setter def metadata(self, metadata): """DEPRECATED. use method metadata_set instead.""" warnings.warn( 'metadata attribute is deprecated, ' 'use method metadata_set instead', DeprecationWarning) self.metadata_set(metadata) def metadata_get(self, refresh=False): if refresh: self._rsrc_metadata = None if self.id is None: return self.t.metadata() if self._rsrc_metadata is not None: return self._rsrc_metadata rs = db_api.resource_get(self.stack.context, self.id) rs.refresh(attrs=['rsrc_metadata']) self._rsrc_metadata = rs.rsrc_metadata return rs.rsrc_metadata def metadata_set(self, metadata): if self.id is None: raise exception.ResourceNotAvailable(resource_name=self.name) rs = db_api.resource_get(self.stack.context, self.id) rs.update_and_save({'rsrc_metadata': metadata}) self._rsrc_metadata = metadata def type(self): return self.t.resource_type def has_interface(self, resource_type): """Check to see if this resource is either mapped to resource_type or is a "resource_type". """ if self.type() == resource_type: return True ri = self.stack.env.get_resource_info(self.type(), self.name) return ri.name == resource_type def implementation_signature(self): ''' Return a tuple defining the implementation. This should be broken down into a definition and an implementation version. ''' return (self.__class__.__name__, self.support_status.version) def identifier(self): '''Return an identifier for this resource.''' return identifier.ResourceIdentifier(resource_name=self.name, **self.stack.identifier()) def parsed_template(self, section=None, default=None): ''' Return the parsed template data for the resource. May be limited to only one section of the data, in which case a default value may also be supplied. ''' default = default or {} if section is None: template = self.t else: template = self.t.get(section, default) return function.resolve(template) def frozen_definition(self): if self._stored_properties_data is not None: args = {'properties': self._stored_properties_data} else: args = {} return self.t.freeze(**args) def update_template_diff(self, after, before): ''' Returns the difference between the before and after json snippets. If something has been removed in after which exists in before we set it to None. ''' # Create a set containing the keys in both current and update template template_keys = set(before.keys()) template_keys.update(set(after.keys())) # Create a set of keys which differ (or are missing/added) changed_keys_set = set( [k for k in template_keys if before.get(k) != after.get(k)]) return dict((k, after.get(k)) for k in changed_keys_set) def update_template_diff_properties(self, after_props, before_props): ''' Returns the changed Properties between the before and after properties. If any property having immutable as True is updated, raises NotSupported error. If any properties have changed which are not in update_allowed_properties, raises UpdateReplace. ''' update_allowed_set = set(self.update_allowed_properties) immutable_set = set() for (psk, psv) in six.iteritems(self.properties.props): if psv.update_allowed(): update_allowed_set.add(psk) if psv.immutable(): immutable_set.add(psk) # Create a set of keys which differ (or are missing/added) changed_properties_set = set( k for k in after_props if before_props.get(k) != after_props.get(k)) # Create a list of updated properties offending property immutability update_replace_forbidden = [ k for k in changed_properties_set if k in immutable_set ] if update_replace_forbidden: mesg = _( "Update to properties %(props)s of %(name)s (%(res)s)") % { 'props': ", ".join(sorted(update_replace_forbidden)), 'res': self.type(), 'name': self.name } raise exception.NotSupported(feature=mesg) if not changed_properties_set.issubset(update_allowed_set): raise UpdateReplace(self.name) return dict((k, after_props.get(k)) for k in changed_properties_set) def __str__(self): if self.stack.id: if self.resource_id: text = '%s "%s" [%s] %s' % (self.__class__.__name__, self.name, self.resource_id, str(self.stack)) else: text = '%s "%s" %s' % (self.__class__.__name__, self.name, str(self.stack)) else: text = '%s "%s"' % (self.__class__.__name__, self.name) return encodeutils.safe_encode(text) def __unicode__(self): if self.stack.id: if self.resource_id: text = '%s "%s" [%s] %s' % (self.__class__.__name__, self.name, self.resource_id, six.text_type(self.stack)) else: text = '%s "%s" %s' % (self.__class__.__name__, self.name, six.text_type(self.stack)) else: text = '%s "%s"' % (self.__class__.__name__, self.name) return encodeutils.safe_decode(text) def add_dependencies(self, deps): for dep in self.t.dependencies(self.stack): deps += (self, dep) deps += (self, None) def required_by(self): ''' Returns a list of names of resources which directly require this resource as a dependency. ''' return list( [r.name for r in self.stack.dependencies.required_by(self)]) def client(self, name=None): client_name = name or self.default_client_name assert client_name, "Must specify client name" return self.stack.clients.client(client_name) def client_plugin(self, name=None): client_name = name or self.default_client_name assert client_name, "Must specify client name" return self.stack.clients.client_plugin(client_name) def keystone(self): return self.client('keystone') def nova(self): return self.client('nova') def swift(self): return self.client('swift') def neutron(self): return self.client('neutron') def cinder(self): return self.client('cinder') def trove(self): return self.client('trove') def ceilometer(self): return self.client('ceilometer') def heat(self): return self.client('heat') def glance(self): return self.client('glance') @contextlib.contextmanager def _action_recorder(self, action, expected_exceptions=tuple()): '''Return a context manager to record the progress of an action. Upon entering the context manager, the state is set to IN_PROGRESS. Upon exiting, the state will be set to COMPLETE if no exception was raised, or FAILED otherwise. Non-exit exceptions will be translated to ResourceFailure exceptions. Expected exceptions are re-raised, with the Resource left in the IN_PROGRESS state. ''' try: self.state_set(action, self.IN_PROGRESS) yield except expected_exceptions as ex: with excutils.save_and_reraise_exception(): LOG.debug('%s', six.text_type(ex)) except Exception as ex: LOG.info('%(action)s: %(info)s', { "action": action, "info": six.text_type(self) }, exc_info=True) failure = exception.ResourceFailure(ex, self, action) self.state_set(action, self.FAILED, six.text_type(failure)) raise failure except: # noqa with excutils.save_and_reraise_exception(): try: self.state_set(action, self.FAILED, '%s aborted' % action) except Exception: LOG.exception(_LE('Error marking resource as failed')) else: self.state_set(action, self.COMPLETE) def action_handler_task(self, action, args=[], action_prefix=None): ''' A task to call the Resource subclass's handler methods for an action. Calls the handle_<ACTION>() method for the given action and then calls the check_<ACTION>_complete() method with the result in a loop until it returns True. If the methods are not provided, the call is omitted. Any args provided are passed to the handler. If a prefix is supplied, the handler method handle_<PREFIX>_<ACTION>() is called instead. ''' handler_action = action.lower() check = getattr(self, 'check_%s_complete' % handler_action, None) if action_prefix: handler_action = '%s_%s' % (action_prefix.lower(), handler_action) handler = getattr(self, 'handle_%s' % handler_action, None) if callable(handler): handler_data = handler(*args) yield if callable(check): while not check(handler_data): yield @scheduler.wrappertask def _do_action(self, action, pre_func=None, resource_data=None): ''' Perform a transition to a new state via a specified action action should be e.g self.CREATE, self.UPDATE etc, we set status based on this, the transition is handled by calling the corresponding handle_* and check_*_complete functions Note pre_func is an optional function reference which will be called before the handle_<action> function If the resource does not declare a check_$action_complete function, we declare COMPLETE status as soon as the handle_$action call has finished, and if no handle_$action function is declared, then we do nothing, useful e.g if the resource requires no action for a given state transition ''' assert action in self.ACTIONS, 'Invalid action %s' % action with self._action_recorder(action): if callable(pre_func): pre_func() handler_args = [resource_data] if resource_data is not None else [] yield self.action_handler_task(action, args=handler_args) def _update_stored_properties(self): self._stored_properties_data = function.resolve(self.properties.data) def preview(self): ''' Default implementation of Resource.preview. This method should be overridden by child classes for specific behavior. ''' return self @scheduler.wrappertask def create(self): ''' Create the resource. Subclasses should provide a handle_create() method to customise creation. ''' action = self.CREATE if (self.action, self.status) != (self.INIT, self.COMPLETE): exc = exception.Error( _('State %s invalid for create') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('creating %s'), six.text_type(self)) # Re-resolve the template, since if the resource Ref's # the StackId pseudo parameter, it will change after # the parser.Stack is stored (which is after the resources # are __init__'d, but before they are create()'d) self.reparse() self._update_stored_properties() def pause(): try: while True: yield except scheduler.Timeout: return count = {self.CREATE: 0, self.DELETE: 0} retry_limit = max(cfg.CONF.action_retry_limit, 0) first_failure = None while (count[self.CREATE] <= retry_limit and count[self.DELETE] <= retry_limit): if count[action]: delay = timeutils.retry_backoff_delay(count[action], jitter_max=2.0) waiter = scheduler.TaskRunner(pause) waiter.start(timeout=delay) while not waiter.step(): yield try: yield self._do_action(action, self.properties.validate) if action == self.CREATE: return else: action = self.CREATE except exception.ResourceFailure as failure: if not isinstance(failure.exc, ResourceInError): raise failure count[action] += 1 if action == self.CREATE: action = self.DELETE count[action] = 0 if first_failure is None: # Save the first exception first_failure = failure if first_failure: raise first_failure def prepare_abandon(self): self.abandon_in_progress = True return { 'name': self.name, 'resource_id': self.resource_id, 'type': self.type(), 'action': self.action, 'status': self.status, 'metadata': self.metadata_get(refresh=True), 'resource_data': self.data() } def adopt(self, resource_data): ''' Adopt the existing resource. Resource subclasses can provide a handle_adopt() method to customise adopt. ''' self._update_stored_properties() return self._do_action(self.ADOPT, resource_data=resource_data) def handle_adopt(self, resource_data=None): resource_id, data, metadata = self._get_resource_info(resource_data) if not resource_id: exc = Exception(_('Resource ID was not provided.')) failure = exception.ResourceFailure(exc, self) raise failure # set resource id self.resource_id_set(resource_id) # save the resource data if data and isinstance(data, dict): for key, value in six.iteritems(data): self.data_set(key, value) # save the resource metadata self.metadata_set(metadata) def _get_resource_info(self, resource_data): if not resource_data: return None, None, None return (resource_data.get('resource_id'), resource_data.get('resource_data'), resource_data.get('metadata')) def _needs_update(self, after, before, after_props, before_props, prev_resource): if self.status == self.FAILED: raise UpdateReplace(self) if prev_resource is not None: cur_class_def, cur_ver = self.implementation_signature() prev_class_def, prev_ver = prev_resource.implementation_signature() if prev_class_def != cur_class_def: raise UpdateReplace(self.name) if prev_ver != cur_ver: return True if before != after.freeze(): return True try: return before_props != after_props except ValueError: return True @scheduler.wrappertask def update(self, after, before=None, prev_resource=None): ''' update the resource. Subclasses should provide a handle_update() method to customise update, the base-class handle_update will fail by default. ''' action = self.UPDATE assert isinstance(after, rsrc_defn.ResourceDefinition) if before is None: before = self.frozen_definition() before_props = before.properties(self.properties_schema, self.context) after_props = after.properties(self.properties_schema, self.context) if not self._needs_update(after, before, after_props, before_props, prev_resource): return if (self.action, self.status) in ((self.CREATE, self.IN_PROGRESS), (self.UPDATE, self.IN_PROGRESS), (self.ADOPT, self.IN_PROGRESS)): exc = Exception(_('Resource update already requested')) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('updating %s'), six.text_type(self)) self.updated_time = datetime.utcnow() with self._action_recorder(action, UpdateReplace): after_props.validate() tmpl_diff = self.update_template_diff(function.resolve(after), before) prop_diff = self.update_template_diff_properties( after_props, before_props) yield self.action_handler_task(action, args=[after, tmpl_diff, prop_diff]) self.t = after self.reparse() self._update_stored_properties() def check(self): """Checks that the physical resource is in its expected state Gets the current status of the physical resource and updates the database accordingly. If check is not supported by the resource, default action is to fail and revert the resource's status to its original state with the added message that check was not performed. """ action = self.CHECK LOG.info(_LI('Checking %s'), six.text_type(self)) if hasattr(self, 'handle_%s' % action.lower()): return self._do_action(action) else: reason = '%s not supported for %s' % (action, self.type()) self.state_set(action, self.COMPLETE, reason) def _verify_check_conditions(self, checks): def valid(check): if isinstance(check['expected'], list): return check['current'] in check['expected'] else: return check['current'] == check['expected'] msg = _("'%(attr)s': expected '%(expected)s', got '%(current)s'") invalid_checks = [msg % check for check in checks if not valid(check)] if invalid_checks: raise exception.Error('; '.join(invalid_checks)) def suspend(self): ''' Suspend the resource. Subclasses should provide a handle_suspend() method to implement suspend ''' action = self.SUSPEND # Don't try to suspend the resource unless it's in a stable state if (self.action == self.DELETE or self.status != self.COMPLETE): exc = exception.Error( _('State %s invalid for suspend') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('suspending %s'), six.text_type(self)) return self._do_action(action) def resume(self): ''' Resume the resource. Subclasses should provide a handle_resume() method to implement resume ''' action = self.RESUME # Can't resume a resource unless it's SUSPEND_COMPLETE if self.state != (self.SUSPEND, self.COMPLETE): exc = exception.Error( _('State %s invalid for resume') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('resuming %s'), six.text_type(self)) return self._do_action(action) def snapshot(self): '''Snapshot the resource and return the created data, if any.''' LOG.info(_LI('snapshotting %s'), six.text_type(self)) return self._do_action(self.SNAPSHOT) @scheduler.wrappertask def delete_snapshot(self, data): yield self.action_handler_task('delete_snapshot', args=[data]) def physical_resource_name(self): if self.id is None: return None name = '%s-%s-%s' % (self.stack.name, self.name, short_id.get_id(self.id)) if self.physical_resource_name_limit: name = self.reduce_physical_resource_name( name, self.physical_resource_name_limit) return name @staticmethod def reduce_physical_resource_name(name, limit): ''' Reduce length of physical resource name to a limit. The reduced name will consist of the following: * the first 2 characters of the name * a hyphen * the end of the name, truncated on the left to bring the name length within the limit :param name: The name to reduce the length of :param limit: The max length limit :returns: A name whose length is less than or equal to the limit ''' if len(name) <= limit: return name if limit < 4: raise ValueError(_('limit cannot be less than 4')) postfix_length = limit - 3 return name[0:2] + '-' + name[-postfix_length:] def validate(self): LOG.info(_LI('Validating %s'), six.text_type(self)) function.validate(self.t) self.validate_deletion_policy(self.t.deletion_policy()) return self.properties.validate(with_value=self.stack.strict_validate) @classmethod def validate_deletion_policy(cls, policy): if policy not in rsrc_defn.ResourceDefinition.DELETION_POLICIES: msg = _('Invalid deletion policy "%s"') % policy raise exception.StackValidationFailed(message=msg) if policy == rsrc_defn.ResourceDefinition.SNAPSHOT: if not callable(getattr(cls, 'handle_snapshot_delete', None)): msg = _('"%s" deletion policy not supported') % policy raise exception.StackValidationFailed(message=msg) @scheduler.wrappertask def delete(self): ''' Delete the resource. Subclasses should provide a handle_delete() method to customise deletion. ''' action = self.DELETE if (self.action, self.status) == (self.DELETE, self.COMPLETE): return # No need to delete if the resource has never been created if self.action == self.INIT: return initial_state = self.state LOG.info(_LI('deleting %s'), six.text_type(self)) with self._action_recorder(action): if self.abandon_in_progress: deletion_policy = self.t.RETAIN else: deletion_policy = self.t.deletion_policy() if deletion_policy != self.t.RETAIN: if deletion_policy == self.t.SNAPSHOT: action_args = [[initial_state], 'snapshot'] else: action_args = [] yield self.action_handler_task(action, *action_args) @scheduler.wrappertask def destroy(self): ''' Delete the resource and remove it from the database. ''' yield self.delete() if self.id is None: return try: db_api.resource_get(self.context, self.id).delete() except exception.NotFound: # Don't fail on delete if the db entry has # not been created yet. pass self.id = None def resource_id_set(self, inst): self.resource_id = inst if self.id is not None: try: rs = db_api.resource_get(self.context, self.id) rs.update_and_save({'nova_instance': self.resource_id}) except Exception as ex: LOG.warn(_LW('db error %s'), ex) def _store(self): '''Create the resource in the database.''' metadata = self.metadata_get() try: rs = { 'action': self.action, 'status': self.status, 'status_reason': self.status_reason, 'stack_id': self.stack.id, 'nova_instance': self.resource_id, 'name': self.name, 'rsrc_metadata': metadata, 'properties_data': self._stored_properties_data, 'stack_name': self.stack.name } new_rs = db_api.resource_create(self.context, rs) self.id = new_rs.id self.created_time = new_rs.created_at self._rsrc_metadata = metadata except Exception as ex: LOG.error(_LE('DB error %s'), ex) def _add_event(self, action, status, reason): '''Add a state change event to the database.''' ev = event.Event(self.context, self.stack, action, status, reason, self.resource_id, self.properties, self.name, self.type()) ev.store() def _store_or_update(self, action, status, reason): self.action = action self.status = status self.status_reason = reason if self.id is not None: try: rs = db_api.resource_get(self.context, self.id) rs.update_and_save({ 'action': self.action, 'status': self.status, 'status_reason': reason, 'stack_id': self.stack.id, 'updated_at': self.updated_time, 'properties_data': self._stored_properties_data, 'nova_instance': self.resource_id }) except Exception as ex: LOG.error(_LE('DB error %s'), ex) # store resource in DB on transition to CREATE_IN_PROGRESS # all other transitions (other than to DELETE_COMPLETE) # should be handled by the update_and_save above.. elif (action, status) in [(self.CREATE, self.IN_PROGRESS), (self.ADOPT, self.IN_PROGRESS)]: self._store() def _resolve_attribute(self, name): """ Default implementation; should be overridden by resources that expose attributes :param name: The attribute to resolve :returns: the resource attribute named key """ # By default, no attributes resolve pass def state_reset(self): """ Reset state to (INIT, COMPLETE) """ self.action = self.INIT self.status = self.COMPLETE def state_set(self, action, status, reason="state changed"): if action not in self.ACTIONS: raise ValueError(_("Invalid action %s") % action) if status not in self.STATUSES: raise ValueError(_("Invalid status %s") % status) old_state = (self.action, self.status) new_state = (action, status) self._store_or_update(action, status, reason) if new_state != old_state: self._add_event(action, status, reason) self.stack.reset_resource_attributes() @property def state(self): '''Returns state, tuple of action, status.''' return (self.action, self.status) def FnGetRefId(self): ''' For the intrinsic function Ref. :results: the id or name of the resource. ''' if self.resource_id is not None: return six.text_type(self.resource_id) else: return six.text_type(self.name) def physical_resource_name_or_FnGetRefId(self): res_name = self.physical_resource_name() if res_name is not None: return six.text_type(res_name) else: return Resource.FnGetRefId(self) def FnGetAtt(self, key, *path): ''' For the intrinsic function Fn::GetAtt. :param key: the attribute key. :param path: a list of path components to select from the attribute. :returns: the attribute value. ''' try: attribute = self.attributes[key] except KeyError: raise exception.InvalidTemplateAttribute(resource=self.name, key=key) else: return attributes.select_from_attribute(attribute, path) def FnBase64(self, data): ''' For the instrinsic function Fn::Base64. :param data: the input data. :returns: the Base64 representation of the input data. ''' return base64.b64encode(data) def signal(self, details=None): ''' signal the resource. Subclasses should provide a handle_signal() method to implement the signal, the base-class raise an exception if no handler is implemented. ''' def get_string_details(): if details is None: return 'No signal details provided' if isinstance(details, six.string_types): return details if isinstance(details, dict): if all(k in details for k in ('previous', 'current', 'reason')): # this is from Ceilometer. auto = '%(previous)s to %(current)s (%(reason)s)' % details return 'alarm state changed from %s' % auto elif 'state' in details: # this is from watchrule return 'alarm state changed to %(state)s' % details return 'Unknown' if not callable(getattr(self, 'handle_signal', None)): raise exception.ResourceActionNotSupported(action='signal') try: signal_result = self.handle_signal(details) if signal_result: reason_string = "Signal: %s" % signal_result else: reason_string = get_string_details() self._add_event('signal', self.status, reason_string) except Exception as ex: LOG.exception( _LE('signal %(name)s : %(msg)s') % { 'name': six.text_type(self), 'msg': ex }) failure = exception.ResourceFailure(ex, self) raise failure def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None): if prop_diff: raise UpdateReplace(self.name) def metadata_update(self, new_metadata=None): ''' No-op for resources which don't explicitly override this method ''' if new_metadata: LOG.warn(_LW("Resource %s does not implement metadata update"), self.name) @classmethod def resource_to_template(cls, resource_type): ''' :param resource_type: The resource type to be displayed in the template :returns: A template where the resource's properties_schema is mapped as parameters, and the resource's attributes_schema is mapped as outputs ''' schema = cls.properties_schema params, props = ( properties.Properties.schema_to_parameters_and_properties(schema)) resource_name = cls.__name__ return { 'HeatTemplateFormatVersion': '2012-12-12', 'Parameters': params, 'Resources': { resource_name: { 'Type': resource_type, 'Properties': props } }, 'Outputs': attributes.Attributes.as_outputs(resource_name, cls) } def data(self): ''' Resource data for this resource Use methods data_set and data_delete to modify the resource data for this resource. :returns: a dict representing the resource data for this resource. ''' if self._data is None and self.id: try: self._data = db_api.resource_data_get_all(self) except exception.NotFound: pass return self._data or {} def data_set(self, key, value, redact=False): '''Save resource's key/value pair to database.''' db_api.resource_data_set(self, key, value, redact) # force fetch all resource data from the database again self._data = None def data_delete(self, key): ''' Remove a resource_data element associated to a resource. :returns: True if the key existed to delete ''' try: db_api.resource_data_delete(self, key) except exception.NotFound: return False else: # force fetch all resource data from the database again self._data = None return True def is_using_neutron(self): try: self.client('neutron') except Exception: return False else: return True