class AWSScalingPolicy(heat_sp.AutoScalingPolicy):
    PROPERTIES = (
        AUTO_SCALING_GROUP_NAME,
        SCALING_ADJUSTMENT,
        ADJUSTMENT_TYPE,
        COOLDOWN,
        MIN_ADJUSTMENT_STEP,
    ) = (
        'AutoScalingGroupName',
        'ScalingAdjustment',
        'AdjustmentType',
        'Cooldown',
        'MinAdjustmentStep',
    )

    ATTRIBUTES = (ALARM_URL, ) = ('AlarmUrl', )

    properties_schema = {
        AUTO_SCALING_GROUP_NAME:
        properties.Schema(properties.Schema.STRING,
                          _('AutoScaling group name to apply policy to.'),
                          required=True),
        SCALING_ADJUSTMENT:
        properties.Schema(properties.Schema.INTEGER,
                          _('Size of adjustment.'),
                          required=True,
                          update_allowed=True),
        ADJUSTMENT_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Type of adjustment (absolute or percentage).'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  sc_util.CFN_CHANGE_IN_CAPACITY,
                                  sc_util.CFN_EXACT_CAPACITY,
                                  sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY
                              ]),
                          ],
                          update_allowed=True),
        COOLDOWN:
        properties.Schema(properties.Schema.INTEGER,
                          _('Cooldown period, in seconds.'),
                          update_allowed=True),
        MIN_ADJUSTMENT_STEP:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Minimum number of resources that are added or removed '
              'when the AutoScaling group scales up or down. This can '
              'be used only when specifying PercentChangeInCapacity '
              'for the AdjustmentType property.'),
            constraints=[
                constraints.Range(min=0, ),
            ],
            update_allowed=True),
    }

    attributes_schema = {
        ALARM_URL:
        attributes.Schema(
            _("A signed url to handle the alarm. (Heat extension)."),
            type=attributes.Schema.STRING),
    }

    def _validate_min_adjustment_step(self):
        adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE)
        adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP)
        if (adjustment_type != sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY
                and adjustment_step is not None):
            raise exception.ResourcePropertyValueDependency(
                prop1=self.MIN_ADJUSTMENT_STEP,
                prop2=self.ADJUSTMENT_TYPE,
                value=sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY)

    def get_reference_id(self):
        if self.resource_id is not None:
            return six.text_type(self._get_ec2_signed_url())
        else:
            return six.text_type(self.name)
Exemple #2
0
class Instance(resource.Resource):
    """OpenStack cloud database instance resource.

    Trove is Database as a Service for OpenStack. It's designed to run entirely
    on OpenStack, with the goal of allowing users to quickly and easily utilize
    the features of a relational or non-relational database without the burden
    of handling complex administrative tasks.
    """

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
        REPLICA_OF,
        REPLICA_COUNT,
    ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
         'restore_point', 'datastore_type', 'datastore_version', 'networks',
         'replica_of', 'replica_count')

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          update_allowed=True,
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          update_allowed=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IPv4 address for this NIC.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_\-]+'
                                                       r'[a-zA-Z0-9_@?#\s\-]*'
                                                       r'[a-zA-Z0-9_\-]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      update_allowed=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%',
                        update_allowed=True),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
        REPLICA_OF:
        properties.Schema(
            properties.Schema.STRING,
            _('Identifier of the source instance to replicate.'),
            support_status=support.SupportStatus(version='5.0.0')),
        REPLICA_COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of replicas to be created.'),
            support_status=support.SupportStatus(version='5.0.0')),
    }

    attributes_schema = {
        HOSTNAME:
        attributes.Schema(_("Hostname of the instance."),
                          type=attributes.Schema.STRING),
        HREF:
        attributes.Schema(_("Api endpoint reference of the instance."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'trove'

    entity = 'instances'

    def __init__(self, name, json_snippet, stack):
        super(Instance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.client().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        """Create cloud database instance."""
        self.flavor = self.client_plugin().find_flavor_by_name_or_id(
            self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties[self.DATABASES]
        self.users = self.properties[self.USERS]
        restore_point = self.properties[self.RESTORE_POINT]
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties[self.AVAILABILITY_ZONE]
        self.datastore_type = self.properties[self.DATASTORE_TYPE]
        self.datastore_version = self.properties[self.DATASTORE_VERSION]
        replica_of = self.properties[self.REPLICA_OF]
        replica_count = self.properties[self.REPLICA_COUNT]

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties[self.NICS]:
            nic_dict = {}
            net = nic.get(self.NET)
            if net:
                if self.is_using_neutron():
                    net_id = self.client_plugin(
                        'neutron').find_resourceid_by_name_or_id(
                            'network', net)
                else:
                    net_id = (
                        self.client_plugin('nova').get_nova_network_id(net))
                nic_dict['net-id'] = net_id
            port = nic.get(self.PORT)
            if port:
                neutron = self.client_plugin('neutron')
                nic_dict['port-id'] = neutron.find_resourceid_by_name_or_id(
                    'port', port)
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create db instance
        instance = self.client().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics,
            replica_of=replica_of,
            replica_count=replica_count)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.client().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warning(
                    _LW("Stack %(name)s (%(id)s) received an "
                        "OverLimit response during instance.get():"
                        " %(exception)s"), {
                            'name': self.stack.name,
                            'id': self.stack.id,
                            'exception': exc
                        })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        """Check if cloud DB instance creation is complete."""
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            _LI("Database instance %(database)s created "
                "(flavor:%(flavor)s, volume:%(volume)s, "
                "datastore:%(datastore_type)s, "
                "datastore_version:%(datastore_version)s)"), {
                    'database': self._dbinstance_name(),
                    'flavor': self.flavor,
                    'volume': self.volume,
                    'datastore_type': self.datastore_type,
                    'datastore_version': self.datastore_version
                })
        return True

    def handle_check(self):
        instance = self.client().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        updates = {}
        if prop_diff:
            instance = self.client().instances.get(self.resource_id)
            if self.NAME in prop_diff:
                updates.update({self.NAME: prop_diff[self.NAME]})
            if self.FLAVOR in prop_diff:
                flvid = prop_diff[self.FLAVOR]
                flv = self.client_plugin().get_flavor_id(flvid)
                updates.update({self.FLAVOR: flv})
            if self.SIZE in prop_diff:
                updates.update({self.SIZE: prop_diff[self.SIZE]})
            if self.DATABASES in prop_diff:
                current = [
                    d.name for d in self.client().databases.list(instance)
                ]
                desired = [
                    d[self.DATABASE_NAME] for d in prop_diff[self.DATABASES]
                ]
                for db in prop_diff[self.DATABASES]:
                    dbname = db[self.DATABASE_NAME]
                    if dbname not in current:
                        db['ACTION'] = self.CREATE
                for dbname in current:
                    if dbname not in desired:
                        deleted = {
                            self.DATABASE_NAME: dbname,
                            'ACTION': self.DELETE
                        }
                        prop_diff[self.DATABASES].append(deleted)
                updates.update({self.DATABASES: prop_diff[self.DATABASES]})
            if self.USERS in prop_diff:
                current = [u.name for u in self.client().users.list(instance)]
                desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]]
                for usr in prop_diff[self.USERS]:
                    if usr[self.USER_NAME] not in current:
                        usr['ACTION'] = self.CREATE
                for usr in current:
                    if usr not in desired:
                        prop_diff[self.USERS].append({
                            self.USER_NAME: usr,
                            'ACTION': self.DELETE
                        })
                updates.update({self.USERS: prop_diff[self.USERS]})
        return updates

    def check_update_complete(self, updates):
        instance = self.client().instances.get(self.resource_id)
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))
        if updates:
            if instance.status != self.ACTIVE:
                dmsg = ("Instance is in status %(now)s. Waiting on status"
                        " %(stat)s")
                LOG.debug(dmsg % {"now": instance.status, "stat": self.ACTIVE})
                return False
            try:
                return (
                    self._update_name(instance, updates.get(self.NAME))
                    and self._update_flavor(instance, updates.get(self.FLAVOR))
                    and self._update_size(instance, updates.get(self.SIZE))
                    and self._update_databases(instance,
                                               updates.get(self.DATABASES))
                    and self._update_users(instance, updates.get(self.USERS)))
            except Exception as exc:
                if self.client_plugin().is_client_exception(exc):
                    # the instance could have updated between the time
                    # we retrieve it and try to update it so check again
                    if self.client_plugin().is_over_limit(exc):
                        LOG.debug("API rate limit: %(ex)s. Retrying." %
                                  {'ex': six.text_type(exc)})
                        return False
                    if "No change was requested" in six.text_type(exc):
                        LOG.warning(
                            _LW("Unexpected instance state change "
                                "during update. Retrying."))
                        return False
                raise
        return True

    def _update_name(self, instance, name):
        if name and instance.name != name:
            self.client().instances.edit(instance, name=name)
            return False
        return True

    def _update_flavor(self, instance, new_flavor):
        if new_flavor:
            current_flav = six.text_type(instance.flavor['id'])
            new_flav = six.text_type(new_flavor)
            if new_flav != current_flav:
                dmsg = "Resizing instance flavor from %(old)s to %(new)s"
                LOG.debug(dmsg % {"old": current_flav, "new": new_flav})
                self.client().instances.resize_instance(instance, new_flavor)
                return False
        return True

    def _update_size(self, instance, new_size):
        if new_size and instance.volume['size'] != new_size:
            dmsg = "Resizing instance storage from %(old)s to %(new)s"
            LOG.debug(dmsg % {"old": instance.volume['size'], "new": new_size})
            self.client().instances.resize_volume(instance, new_size)
            return False
        return True

    def _update_databases(self, instance, databases):
        if databases:
            for db in databases:
                if db.get("ACTION") == self.CREATE:
                    db.pop("ACTION", None)
                    dmsg = "Adding new database %(db)s to instance"
                    LOG.debug(dmsg % {"db": db})
                    self.client().databases.create(instance, [db])
                elif db.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing database %(db)s from "
                            "instance")
                    LOG.debug(dmsg % {"db": db['name']})
                    self.client().databases.delete(instance, db['name'])
        return True

    def _update_users(self, instance, users):
        if users:
            for usr in users:
                dbs = [{'name': db} for db in usr.get(self.USER_DATABASES, [])]
                usr[self.USER_DATABASES] = dbs
                if usr.get("ACTION") == self.CREATE:
                    usr.pop("ACTION", None)
                    dmsg = "Adding new user %(u)s to instance"
                    LOG.debug(dmsg % {"u": usr})
                    self.client().users.create(instance, [usr])
                elif usr.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing user %(u)s from " "instance")
                    LOG.debug(dmsg % {"u": usr['name']})
                    self.client().users.delete(instance, usr['name'])
                else:
                    newattrs = {}
                    if usr.get(self.USER_HOST):
                        newattrs[self.USER_HOST] = usr[self.USER_HOST]
                    if usr.get(self.USER_PASSWORD):
                        newattrs[self.USER_PASSWORD] = usr[self.USER_PASSWORD]
                    if newattrs:
                        self.client().users.update_attributes(
                            instance,
                            usr['name'],
                            newuserattr=newattrs,
                            hostname=instance.hostname)
                    current = self.client().users.get(instance,
                                                      usr[self.USER_NAME])
                    dbs = [db['name'] for db in current.databases]
                    desired = [
                        db['name'] for db in usr.get(self.USER_DATABASES, [])
                    ]
                    grants = [db for db in desired if db not in dbs]
                    revokes = [db for db in dbs if db not in desired]
                    if grants:
                        self.client().users.grant(instance,
                                                  usr[self.USER_NAME], grants)
                    if revokes:
                        self.client().users.revoke(instance,
                                                   usr[self.USER_NAME],
                                                   revokes)
        return True

    def handle_delete(self):
        """Delete a cloud database instance."""
        if not self.resource_id:
            return

        try:
            instance = self.client().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        """Check for completion of cloud DB instance deletion."""
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        """Validate any of the provided params."""
        res = super(Instance, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties[self.USERS]
        if users:
            databases = self.properties[self.DATABASES]
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties[self.NICS]
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
Exemple #3
0
class ElasticIp(resource.Resource):
    PROPERTIES = (
        DOMAIN,
        INSTANCE_ID,
    ) = (
        'Domain',
        'InstanceId',
    )

    ATTRIBUTES = (ALLOCATION_ID, ) = ('AllocationId', )

    properties_schema = {
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Set to "vpc" to have IP address allocation associated to your '
              'VPC.'),
            support_status=support.SupportStatus(
                status=support.DEPRECATED,
                message=_('Now we only allow vpc here, so no need to set up '
                          'this tag anymore.'),
                version='9.0.0'),
            constraints=[
                constraints.AllowedValues(['vpc']),
            ]),
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.server')]),
    }

    attributes_schema = {
        ALLOCATION_ID:
        attributes.Schema(_(
            'ID that AWS assigns to represent the allocation of the address '
            'for use with Amazon VPC. Returned only for VPC elastic IP '
            'addresses.'),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'nova'

    def __init__(self, name, json_snippet, stack):
        super(ElasticIp, self).__init__(name, json_snippet, stack)
        self.ipaddress = None

    def _ipaddress(self):
        if self.ipaddress is None and self.resource_id is not None:
            try:
                ips = self.neutron().show_floatingip(self.resource_id)
            except Exception as ex:
                self.client_plugin('neutron').ignore_not_found(ex)
            else:
                self.ipaddress = ips['floatingip']['floating_ip_address']
        return self.ipaddress or ''

    def handle_create(self):
        """Allocate a floating IP for the current tenant."""
        ips = None
        ext_net = internet_gateway.InternetGateway.get_external_network_id(
            self.neutron())
        props = {'floating_network_id': ext_net}
        ips = self.neutron().create_floatingip({'floatingip':
                                                props})['floatingip']
        self.ipaddress = ips['floating_ip_address']
        self.resource_id_set(ips['id'])
        LOG.info('ElasticIp create %s', str(ips))

        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            server = self.client().servers.get(instance_id)
            server.add_floating_ip(self._ipaddress())

    def handle_delete(self):
        if self.resource_id is None:
            return
        # may be just create an eip when creation, or create the association
        # failed when creation, there will no association, if we attempt to
        # disassociate, an exception will raised, we need
        # to catch and ignore it, and then to deallocate the eip
        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            try:
                server = self.client().servers.get(instance_id)
                if server:
                    server.remove_floating_ip(self._ipaddress())
            except Exception as e:
                is_not_found = self.client_plugin('nova').is_not_found(e)
                is_unprocessable_entity = self.client_plugin(
                    'nova').is_unprocessable_entity(e)

                if (not is_not_found and not is_unprocessable_entity):
                    raise

        # deallocate the eip
        with self.client_plugin('neutron').ignore_not_found:
            self.neutron().delete_floatingip(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.INSTANCE_ID in prop_diff:
                instance_id = prop_diff.get(self.INSTANCE_ID)
                if instance_id:
                    # no need to remove the floating ip from the old instance,
                    # nova does this automatically when calling
                    # add_floating_ip().
                    server = self.client().servers.get(instance_id)
                    server.add_floating_ip(self._ipaddress())
                else:
                    # to remove the floating_ip from the old instance
                    instance_id_old = self.properties[self.INSTANCE_ID]
                    if instance_id_old:
                        server = self.client().servers.get(instance_id_old)
                        server.remove_floating_ip(self._ipaddress())

    def get_reference_id(self):
        eip = self._ipaddress()
        if eip:
            return six.text_type(eip)
        else:
            return six.text_type(self.name)

    def _resolve_attribute(self, name):
        if name == self.ALLOCATION_ID:
            return six.text_type(self.resource_id)
Exemple #4
0
class BGPVPNRouterAssoc(neutron.NeutronResource):
    """A resource for BGPVPNRouterAssoc in neutron.

    """

    PROPERTIES = (BGPVPN_ID, ROUTER_ID) = ('bgpvpn_id', 'router_id')

    ATTRIBUTES = (SHOW, STATUS) = ('show', 'status')

    properties_schema = {
        BGPVPN_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('name or ID of the BGPVPN.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.bgpvpn')]),
        ROUTER_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Router which shall be associated with the BGPVPN.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.router')])
    }

    attributes_schema = {
        STATUS: attributes.Schema(_('Status of bgpvpn.'), ),
        SHOW: attributes.Schema(_('All attributes.')),
    }

    def validate(self):
        super(BGPVPNRouterAssoc, self).validate()

    def handle_create(self):
        self.props = self.prepare_properties(self.properties,
                                             self.physical_resource_name())

        body = self.props.copy()
        body.pop('bgpvpn_id')

        bgpvpn_id = self.client_plugin().find_resourceid_by_name_or_id(
            'bgpvpn', self.props['bgpvpn_id'])

        router_assoc = self.neutron().create_bgpvpn_router_assoc(
            bgpvpn_id, {'router_association': body})
        self.resource_id_set(router_assoc['router_association']['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        raise NotImplementedError()

    def handle_delete(self):
        try:
            self.neutron().delete_bgpvpn_router_assoc(
                self.properties['bgpvpn_id'], self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _confirm_delete(self):
        while True:
            try:
                self._show_resource()
            except exception.NotFound:
                return

    def _show_resource(self):
        return self.neutron().show_bgpvpn_router_assoc(
            self.properties['bgpvpn_id'], self.resource_id)
class Workflow(signal_responder.SignalResponder, resource.Resource):
    """A resource that implements Mistral workflow.

    Workflow represents a process that can be described in a various number of
    ways and that can do some job interesting to the end user. Each workflow
    consists of tasks (at least one) describing what exact steps should be made
    during workflow execution.

    For detailed description how to use Workflow, read Mistral documentation.
    """

    support_status = support.SupportStatus(version='2015.1')

    default_client_name = 'mistral'

    entity = 'workflows'

    PROPERTIES = (NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS,
                  TASK_DEFAULTS,
                  USE_REQUEST_BODY_AS_INPUT) = ('name', 'type', 'description',
                                                'input', 'output', 'tasks',
                                                'params', 'task_defaults',
                                                'use_request_body_as_input')

    _TASKS_KEYS = (TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE,
                   ON_SUCCESS, POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT,
                   REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE,
                   TIMEOUT, WITH_ITEMS, KEEP_RESULT, TARGET,
                   JOIN) = ('name', 'description', 'on_error', 'on_complete',
                            'on_success', 'policies', 'action', 'workflow',
                            'publish', 'input', 'requires', 'retry',
                            'wait_before', 'wait_after', 'pause_before',
                            'timeout', 'with_items', 'keep_result', 'target',
                            'join')

    _TASKS_TASK_DEFAULTS = [
        ON_ERROR, ON_COMPLETE, ON_SUCCESS, REQUIRES, RETRY, WAIT_BEFORE,
        WAIT_AFTER, PAUSE_BEFORE, TIMEOUT
    ]

    _SIGNAL_DATA_KEYS = (SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS) = ('input',
                                                                   'params')

    ATTRIBUTES = (WORKFLOW_DATA, ALARM_URL, EXECUTIONS) = ('data', 'alarm_url',
                                                           'executions')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING, _('Workflow name.')),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Workflow type.'),
            constraints=[constraints.AllowedValues(['direct', 'reverse'])],
            required=True,
            update_allowed=True),
        USE_REQUEST_BODY_AS_INPUT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Defines the method in which the request body for signaling a '
              'workflow would be parsed. In case this property is set to '
              'True, the body would be parsed as a simple json where each '
              'key is a workflow input, in other cases body would be parsed '
              'expecting a specific json format with two keys: "input" and '
              '"params"'),
            update_allowed=True,
            support_status=support.SupportStatus(version='6.0.0')),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Workflow description.'),
                          update_allowed=True),
        INPUT:
        properties.Schema(properties.Schema.MAP,
                          _('Dictionary which contains input for workflow.'),
                          update_allowed=True),
        OUTPUT:
        properties.Schema(properties.Schema.MAP,
                          _('Any data structure arbitrarily containing YAQL '
                            'expressions that defines workflow output. May be '
                            'nested.'),
                          update_allowed=True),
        PARAMS:
        properties.Schema(
            properties.Schema.MAP,
            _("Workflow additional parameters. If Workflow is reverse typed, "
              "params requires 'task_name', which defines initial task."),
            update_allowed=True),
        TASK_DEFAULTS:
        properties.Schema(
            properties.Schema.MAP,
            _("Default settings for some of task "
              "attributes defined "
              "at workflow level."),
            support_status=support.SupportStatus(version='5.0.0'),
            schema={
                ON_SUCCESS:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed successfully.')),
                ON_ERROR:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed with an error.')),
                ON_COMPLETE:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed regardless of whether '
                      'it is successful or not.')),
                REQUIRES:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which should be executed before '
                      'this task. Used only in reverse workflows.')),
                RETRY:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Defines a pattern how task should be repeated in '
                      'case of an error.')),
                WAIT_BEFORE:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a delay in seconds that Mistral Engine'
                      ' should wait before starting a task.')),
                WAIT_AFTER:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a delay in seconds that Mistral Engine'
                      ' should wait after a task has completed before'
                      ' starting next tasks defined in '
                      'on-success, on-error or on-complete.')),
                PAUSE_BEFORE:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('Defines whether Mistral Engine should put the '
                      'workflow on hold or not before starting a task')),
                TIMEOUT:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a period of time in seconds after which '
                      'a task will be failed automatically '
                      'by engine if hasn\'t completed.')),
            },
            update_allowed=True),
        TASKS:
        properties.Schema(
            properties.Schema.LIST,
            _('Dictionary containing workflow tasks.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TASK_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      _('Task name.'),
                                      required=True),
                    TASK_DESCRIPTION:
                    properties.Schema(properties.Schema.STRING,
                                      _('Task description.')),
                    TASK_INPUT:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Actual input parameter values of the task.')),
                    ACTION:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the action associated with the task. '
                          'Either action or workflow may be defined in the '
                          'task.')),
                    WORKFLOW:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the workflow associated with the task. '
                          'Can be defined by intrinsic function get_resource '
                          'or by name of the referenced workflow, i.e. '
                          '{ workflow: wf_name } or '
                          '{ workflow: { get_resource: wf_name }}. Either '
                          'action or workflow may be defined in the task.')),
                    PUBLISH:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Dictionary of variables to publish to '
                          'the workflow context.')),
                    ON_SUCCESS:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed successfully.')),
                    ON_ERROR:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed with an error.')),
                    ON_COMPLETE:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed regardless of whether '
                          'it is successful or not.')),
                    POLICIES:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Dictionary-like section defining task policies '
                          'that influence how Mistral Engine runs tasks. Must '
                          'satisfy Mistral DSL v2.'),
                        support_status=support.SupportStatus(
                            status=support.DEPRECATED,
                            version='5.0.0',
                            message=_('Add needed policies directly to '
                                      'the task, Policy keyword is not '
                                      'needed'),
                            previous_status=support.SupportStatus(
                                version='2015.1'))),
                    REQUIRES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which should be executed before '
                          'this task. Used only in reverse workflows.')),
                    RETRY:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Defines a pattern how task should be repeated in '
                          'case of an error.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    WAIT_BEFORE:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a delay in seconds that Mistral Engine '
                          'should wait before starting a task.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    WAIT_AFTER:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a delay in seconds that Mistral '
                          'Engine should wait after '
                          'a task has completed before starting next tasks '
                          'defined in on-success, on-error or on-complete.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    PAUSE_BEFORE:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Defines whether Mistral Engine should '
                          'put the workflow on hold '
                          'or not before starting a task.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    TIMEOUT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a period of time in seconds after which a '
                          'task will be failed automatically by engine '
                          'if hasn\'t completed.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    WITH_ITEMS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('If configured, it allows to run action or workflow '
                          'associated with a task multiple times '
                          'on a provided list of items.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    KEEP_RESULT:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Allowing not to store action results '
                          'after task completion.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    TARGET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('It defines an executor to which task action '
                          'should be sent to.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    JOIN:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Allows to synchronize multiple parallel workflow '
                          'branches and aggregate their data. '
                          'Valid inputs: all - the task will run only if '
                          'all upstream tasks are completed.'
                          ' Any numeric value - then the task will run once '
                          'at least this number of upstream tasks are '
                          'completed and corresponding conditions have '
                          'triggered.'),
                        support_status=support.SupportStatus(version='6.0.0')),
                },
            ),
            required=True,
            update_allowed=True)
    }

    attributes_schema = {
        WORKFLOW_DATA:
        attributes.Schema(
            _('A dictionary which contains name and input of the workflow.'),
            type=attributes.Schema.MAP),
        ALARM_URL:
        attributes.Schema(_(
            "A signed url to create executions for workflows specified in "
            "Workflow resource."),
                          type=attributes.Schema.STRING),
        EXECUTIONS:
        attributes.Schema(_(
            "List of workflows' executions, each of them is a dictionary "
            "with information about execution. Each dictionary returns "
            "values for next keys: id, workflow_name, created_at, "
            "updated_at, state for current execution state, input, output."),
                          type=attributes.Schema.LIST)
    }

    def get_reference_id(self):
        return self._workflow_name()

    def _get_inputs_and_params(self, data):
        inputs = None
        params = None
        if self.properties.get(self.USE_REQUEST_BODY_AS_INPUT):
            inputs = data
        else:
            if data is not None:
                inputs = data.get(self.SIGNAL_DATA_INPUT)
                params = data.get(self.SIGNAL_DATA_PARAMS)
        return inputs, params

    def _validate_signal_data(self, data):
        input_value, params_value = self._get_inputs_and_params(data)
        if input_value is not None:
            if not isinstance(input_value, dict):
                message = (_('Input in signal data must be a map, '
                             'find a %s') % type(input_value))
                raise exception.StackValidationFailed(
                    error=_('Signal data error'), message=message)
            for key in six.iterkeys(input_value):
                if (self.properties.get(self.INPUT) is None
                        or key not in self.properties.get(self.INPUT)):
                    message = _('Unknown input %s') % key
                    raise exception.StackValidationFailed(
                        error=_('Signal data error'), message=message)
        if params_value is not None and not isinstance(params_value, dict):
            message = (_('Params must be a map, find a '
                         '%s') % type(params_value))
            raise exception.StackValidationFailed(error=_('Signal data error'),
                                                  message=message)

    def validate(self):
        super(Workflow, self).validate()
        if self.properties.get(self.TYPE) == 'reverse':
            params = self.properties.get(self.PARAMS)
            if params is None or not params.get('task_name'):
                raise exception.StackValidationFailed(
                    error=_('Mistral resource validation error'),
                    path=[
                        self.name,
                        ('properties' if self.stack.t.VERSION
                         == 'heat_template_version' else 'Properties'),
                        self.PARAMS
                    ],
                    message=_("'task_name' is not assigned in 'params' "
                              "in case of reverse type workflow."))
        for task in self.properties.get(self.TASKS):
            wf_value = task.get(self.WORKFLOW)
            action_value = task.get(self.ACTION)
            if wf_value and action_value:
                raise exception.ResourcePropertyConflict(
                    self.WORKFLOW, self.ACTION)
            if not wf_value and not action_value:
                raise exception.PropertyUnspecifiedError(
                    self.WORKFLOW, self.ACTION)
            if (task.get(self.REQUIRES) is not None
                    and self.properties.get(self.TYPE)) == 'direct':
                msg = _("task %(task)s contains property 'requires' "
                        "in case of direct workflow. Only reverse workflows "
                        "can contain property 'requires'.") % {
                            'name': self.name,
                            'task': task.get(self.TASK_NAME)
                        }
                raise exception.StackValidationFailed(
                    error=_('Mistral resource validation error'),
                    path=[
                        self.name,
                        ('properties' if self.stack.t.VERSION
                         == 'heat_template_version' else 'Properties'),
                        self.TASKS,
                        task.get(self.TASK_NAME), self.REQUIRES
                    ],
                    message=msg)

            if task.get(self.POLICIES) is not None:
                for task_item in task.get(self.POLICIES):
                    if task.get(task_item) is not None:
                        msg = _('Property %(policies)s and %(item)s cannot be '
                                'used both at one time.') % {
                                    'policies': self.POLICIES,
                                    'item': task_item
                                }
                        raise exception.StackValidationFailed(message=msg)

    def _workflow_name(self):
        return self.properties.get(self.NAME) or self.physical_resource_name()

    def build_tasks(self, props):
        for task in props[self.TASKS]:
            current_task = {}
            wf_value = task.get(self.WORKFLOW)
            if wf_value is not None:
                if wf_value in [
                        res.resource_id for res in six.itervalues(self.stack)
                ]:
                    current_task.update({self.WORKFLOW: wf_value})
                else:
                    msg = _("No such workflow %s") % wf_value
                    raise ValueError(msg)

            # backward support for kilo.
            if task.get(self.POLICIES) is not None:
                task.update(task.get(self.POLICIES))

            task_keys = [
                key for key in self._TASKS_KEYS
                if key not in [self.WORKFLOW, self.TASK_NAME, self.POLICIES]
            ]
            for task_prop in task_keys:
                if task.get(task_prop) is not None:
                    current_task.update(
                        {task_prop.replace('_', '-'): task[task_prop]})

            yield {task[self.TASK_NAME]: current_task}

    def prepare_properties(self, props):
        """Prepare correct YAML-formatted definition for Mistral."""
        defn_name = self._workflow_name()
        definition = {
            'version': '2.0',
            defn_name: {
                self.TYPE: props.get(self.TYPE),
                self.DESCRIPTION: props.get(self.DESCRIPTION),
                self.OUTPUT: props.get(self.OUTPUT)
            }
        }
        for key in list(definition[defn_name].keys()):
            if definition[defn_name][key] is None:
                del definition[defn_name][key]
        if props.get(self.INPUT) is not None:
            definition[defn_name][self.INPUT] = list(
                props.get(self.INPUT).keys())
        definition[defn_name][self.TASKS] = {}
        for task in self.build_tasks(props):
            definition.get(defn_name).get(self.TASKS).update(task)

        if props.get(self.TASK_DEFAULTS) is not None:
            definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
                k.replace('_', '-'): v
                for k, v in six.iteritems(props.get(self.TASK_DEFAULTS)) if v
            }

        return yaml.dump(definition,
                         Dumper=yaml.CSafeDumper if hasattr(
                             yaml, 'CSafeDumper') else yaml.SafeDumper)

    def handle_create(self):
        super(Workflow, self).handle_create()
        props = self.prepare_properties(self.properties)
        try:
            workflow = self.client().workflows.create(props)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)
        # NOTE(prazumovsky): Mistral uses unique names for resource
        # identification.
        self.resource_id_set(workflow[0].name)

    def handle_signal(self, details=None):
        self._validate_signal_data(details)

        result_input = {}
        result_params = {}
        inputs, params = self._get_inputs_and_params(details)
        if inputs is not None:
            # NOTE(prazumovsky): Signal can contains some data, interesting
            # for workflow, e.g. inputs. So, if signal data contains input
            # we update override inputs, other leaved defined in template.
            for key, value in six.iteritems(self.properties.get(self.INPUT)):
                result_input.update({key: inputs.get(key) or value})
        if params is not None:
            if self.properties.get(self.PARAMS) is not None:
                result_params.update(self.properties.get(self.PARAMS))
            result_params.update(params)

        if not result_input and self.properties.get(self.INPUT):
            result_input.update(self.properties.get(self.INPUT))
        if not result_params and self.properties.get(self.PARAMS):
            result_params.update(self.properties.get(self.PARAMS))

        try:
            execution = self.client().executions.create(
                self._workflow_name(), jsonutils.dumps(result_input),
                **result_params)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)
        executions = [execution.id]
        if self.EXECUTIONS in self.data():
            executions.extend(self.data().get(self.EXECUTIONS).split(','))
        self.data_set(self.EXECUTIONS, ','.join(executions))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        update_allowed = [self.INPUT, self.PARAMS, self.DESCRIPTION]
        for prop in update_allowed:
            if prop in prop_diff:
                del prop_diff[prop]
        if len(prop_diff) > 0:
            new_props = self.prepare_properties(tmpl_diff['Properties'])
            try:
                workflow = self.client().workflows.update(new_props)
            except Exception as ex:
                raise exception.ResourceFailure(ex, self)
            self.data_set(self.NAME, workflow[0].name)
            self.resource_id_set(workflow[0].name)

    def _delete_executions(self):
        if self.data().get(self.EXECUTIONS):
            for id in self.data().get(self.EXECUTIONS).split(','):
                with self.client_plugin().ignore_not_found:
                    self.client().executions.delete(id)

            self.data_delete('executions')

    def handle_delete(self):
        self._delete_executions()
        return super(Workflow, self).handle_delete()

    def _resolve_attribute(self, name):
        if name == self.EXECUTIONS:
            if self.EXECUTIONS not in self.data():
                return []

            def parse_execution_response(execution):
                return {
                    'id': execution.id,
                    'workflow_name': execution.workflow_name,
                    'created_at': execution.created_at,
                    'updated_at': execution.updated_at,
                    'state': execution.state,
                    'input': jsonutils.loads(six.text_type(execution.input)),
                    'output': jsonutils.loads(six.text_type(execution.output))
                }

            return [
                parse_execution_response(self.client().executions.get(exec_id))
                for exec_id in self.data().get(self.EXECUTIONS).split(',')
            ]

        elif name == self.WORKFLOW_DATA:
            return {
                self.NAME: self.resource_id,
                self.INPUT: self.properties.get(self.INPUT)
            }

        elif name == self.ALARM_URL:
            return six.text_type(self._get_ec2_signed_url())

    # TODO(tlashchova): remove this method when mistralclient>1.0.0 is used.
    def _show_resource(self):
        workflow = self.client().workflows.get(self.resource_id)
        if hasattr(workflow, 'to_dict'):
            super(Workflow, self)._show_resource()
        return workflow._data
Exemple #6
0
class AccessKey(resource.Resource):
    PROPERTIES = (
        SERIAL, USER_NAME, STATUS,
    ) = (
        'Serial', 'UserName', 'Status',
    )

    ATTRIBUTES = (
        USER_NAME, SECRET_ACCESS_KEY,
    ) = (
        'UserName', 'SecretAccessKey',
    )

    properties_schema = {
        SERIAL: properties.Schema(
            properties.Schema.INTEGER,
            _('Not Implemented.'),
            implemented=False
        ),
        USER_NAME: properties.Schema(
            properties.Schema.STRING,
            _('The name of the user that the new key will belong to.'),
            required=True
        ),
        STATUS: properties.Schema(
            properties.Schema.STRING,
            _('Not Implemented.'),
            constraints=[
                constraints.AllowedValues(['Active', 'Inactive']),
            ],
            implemented=False
        ),
    }

    attributes_schema = {
        USER_NAME: attributes.Schema(
            _('Username associated with the AccessKey.'),
            cache_mode=attributes.Schema.CACHE_NONE,
            type=attributes.Schema.STRING
        ),
        SECRET_ACCESS_KEY: attributes.Schema(
            _('Keypair secret key.'),
            cache_mode=attributes.Schema.CACHE_NONE,
            type=attributes.Schema.STRING
        ),
    }

    def __init__(self, name, json_snippet, stack):
        super(AccessKey, self).__init__(name, json_snippet, stack)
        self._secret = None
        if self.resource_id:
            self._register_access_key()

    def _get_user(self):
        """Derive the keystone userid, stored in the User resource_id.

        Helper function to derive the keystone userid, which is stored in the
        resource_id of the User associated with this key. We want to avoid
        looking the name up via listing keystone users, as this requires admin
        rights in keystone, so FnGetAtt which calls _secret_accesskey won't
        work for normal non-admin users.
        """
        # Lookup User resource by intrinsic reference (which is what is passed
        # into the UserName parameter.  Would be cleaner to just make the User
        # resource return resource_id for FnGetRefId but the AWS definition of
        # user does say it returns a user name not ID
        return self.stack.resource_by_refid(self.properties[self.USER_NAME])

    def handle_create(self):
        user = self._get_user()
        if user is None:
            raise exception.NotFound(_('could not find user %s') %
                                     self.properties[self.USER_NAME])
        # The keypair is actually created and owned by the User resource
        kp = user._create_keypair()
        self.resource_id_set(kp.access)
        self._secret = kp.secret
        self._register_access_key()

        # Store the secret key, encrypted, in the DB so we don't have lookup
        # the user every time someone requests the SecretAccessKey attribute
        self.data_set('secret_key', kp.secret, redact=True)
        self.data_set('credential_id', kp.id, redact=True)

    def handle_delete(self):
        self._secret = None
        if self.resource_id is None:
            return

        user = self._get_user()
        if user is None:
            LOG.debug('Error deleting %s - user not found', str(self))
            return
        user._delete_keypair()

    def _secret_accesskey(self):
        """Return the user's access key.

        Fetching it from keystone if necessary.
        """
        if self._secret is None:
            if not self.resource_id:
                LOG.info('could not get secret for %(username)s '
                         'Error:%(msg)s',
                         {'username': self.properties[self.USER_NAME],
                          'msg': "resource_id not yet set"})
            else:
                # First try to retrieve the secret from resource_data, but
                # for backwards compatibility, fall back to requesting from
                # keystone
                self._secret = self.data().get('secret_key')
                if self._secret is None:
                    try:
                        user_id = self._get_user().resource_id
                        kp = self.keystone().get_ec2_keypair(
                            user_id=user_id, access=self.resource_id)
                        self._secret = kp.secret
                        # Store the key in resource_data
                        self.data_set('secret_key', kp.secret, redact=True)
                        # And the ID of the v3 credential
                        self.data_set('credential_id', kp.id, redact=True)
                    except Exception as ex:
                        LOG.info('could not get secret for %(username)s '
                                 'Error:%(msg)s',
                                 {'username': self.properties[self.USER_NAME],
                                  'msg': ex})

        return self._secret or '000-000-000'

    def _resolve_attribute(self, name):
        if name == self.USER_NAME:
            return self.properties[self.USER_NAME]
        elif name == self.SECRET_ACCESS_KEY:
            return self._secret_accesskey()

    def _register_access_key(self):

        def access_allowed(resource_name):
            return self._get_user().access_allowed(resource_name)
        self.stack.register_access_allowed_handler(
            self.resource_id, access_allowed)
Exemple #7
0
class AutoScalingPolicy(signal_responder.SignalResponder,
                        cooldown.CooldownMixin):
    """A resource to manage scaling of `OS::Heat::AutoScalingGroup`.

    **Note** while it may incidentally support
    `AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that
    purpose and use `AWS::AutoScaling::ScalingPolicy` instead.
    """
    PROPERTIES = (
        AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
        COOLDOWN,
    ) = (
        'auto_scaling_group_id', 'scaling_adjustment', 'adjustment_type',
        'cooldown',
    )

    EXACT_CAPACITY, CHANGE_IN_CAPACITY, PERCENT_CHANGE_IN_CAPACITY = (
        'exact_capacity', 'change_in_capacity', 'percent_change_in_capacity')

    ATTRIBUTES = (
        ALARM_URL,
    ) = (
        'alarm_url',
    )

    properties_schema = {
        # TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID
        AUTO_SCALING_GROUP_NAME: properties.Schema(
            properties.Schema.STRING,
            _('AutoScaling group ID to apply policy to.'),
            required=True
        ),
        SCALING_ADJUSTMENT: properties.Schema(
            properties.Schema.NUMBER,
            _('Size of adjustment.'),
            required=True,
            update_allowed=True
        ),
        ADJUSTMENT_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Type of adjustment (absolute or percentage).'),
            required=True,
            constraints=[
                constraints.AllowedValues([CHANGE_IN_CAPACITY,
                                           EXACT_CAPACITY,
                                           PERCENT_CHANGE_IN_CAPACITY]),
            ],
            update_allowed=True
        ),
        COOLDOWN: properties.Schema(
            properties.Schema.NUMBER,
            _('Cooldown period, in seconds.'),
            update_allowed=True
        ),
    }

    attributes_schema = {
        ALARM_URL: attributes.Schema(
            _("A signed url to handle the alarm.")
        ),
    }

    def handle_create(self):
        super(AutoScalingPolicy, self).handle_create()
        self.resource_id_set(self._get_user_id())

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """
        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if prop_diff:
            self.properties = json_snippet.properties(self.properties_schema,
                                                      self.context)

    def _get_adjustement_type(self):
        adjustment_type = self.properties[self.ADJUSTMENT_TYPE]
        return ''.join([t.capitalize() for t in adjustment_type.split('_')])

    def handle_signal(self, details=None):
        if self.action in (self.SUSPEND, self.DELETE):
            msg = _('Cannot signal resource during %s') % self.action
            raise Exception(msg)

        # ceilometer sends details like this:
        # {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm',
        #  u'reason': u'...'})
        # in this policy we currently assume that this gets called
        # only when there is an alarm. But the template writer can
        # put the policy in all the alarm notifiers (nodata, and ok).
        #
        # our watchrule has upper case states so lower() them all.
        if details is None:
            alarm_state = 'alarm'
        else:
            alarm_state = details.get('current',
                                      details.get('state', 'alarm')).lower()

        LOG.info(_LI('Alarm %(name)s, new state %(state)s'),
                 {'name': self.name, 'state': alarm_state})

        if alarm_state != 'alarm':
            return
        if self._cooldown_inprogress():
            LOG.info(_LI("%(name)s NOT performing scaling action, "
                         "cooldown %(cooldown)s"),
                     {'name': self.name,
                      'cooldown': self.properties[self.COOLDOWN]})
            return

        asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
        group = self.stack.resource_by_refid(asgn_id)
        if group is None:
            raise exception.NotFound(_('Alarm %(alarm)s could not find '
                                       'scaling group named "%(group)s"') % {
                                           'alarm': self.name,
                                           'group': asgn_id})

        LOG.info(_LI('%(name)s Alarm, adjusting Group %(group)s with id '
                     '%(asgn_id)s by %(filter)s'),
                 {'name': self.name, 'group': group.name, 'asgn_id': asgn_id,
                  'filter': self.properties[self.SCALING_ADJUSTMENT]})
        adjustment_type = self._get_adjustement_type()
        group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type)

        self._cooldown_timestamp("%s : %s" %
                                 (self.properties[self.ADJUSTMENT_TYPE],
                                  self.properties[self.SCALING_ADJUSTMENT]))

    def _resolve_attribute(self, name):
        if name == self.ALARM_URL and self.resource_id is not None:
            return six.text_type(self._get_signed_url())

    def FnGetRefId(self):
        return resource.Resource.FnGetRefId(self)
Exemple #8
0
class NovaFloatingIp(resource.Resource):
    """A resource for managing Nova floating IPs.

    Floating IP addresses can change their association between instances by
    action of the user.  One of the most common use cases for floating IPs is
    to provide public IP addresses to a private cloud, where there are a
    limited number of IP addresses available. Another is for a public cloud
    user to have a "static" IP address that can be reassigned when an instance
    is upgraded or moved.
    """
    support_status = support.SupportStatus(version='2014.1')

    required_service_extension = 'os-floating-ips'

    PROPERTIES = (POOL, ) = ('pool', )

    ATTRIBUTES = (
        POOL_ATTR,
        IP,
    ) = (
        'pool',
        'ip',
    )

    properties_schema = {
        POOL:
        properties.Schema(properties.Schema.STRING,
                          description=_('Allocate a floating IP from a given '
                                        'floating IP pool.')),
    }

    attributes_schema = {
        POOL_ATTR:
        attributes.Schema(_('Pool from which floating IP is allocated.'),
                          type=attributes.Schema.STRING),
        IP:
        attributes.Schema(_('Allocated floating IP address.'),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'nova'

    entity = 'floating_ips'

    def __init__(self, name, json_snippet, stack):
        super(NovaFloatingIp, self).__init__(name, json_snippet, stack)
        self._floating_ip = None

    def _get_resource(self):
        if self._floating_ip is None and self.resource_id is not None:
            self._floating_ip = self.client().floating_ips.get(
                self.resource_id)

        return self._floating_ip

    def handle_create(self):
        try:
            pool = self.properties[self.POOL]
            floating_ip = self.client().floating_ips.create(pool=pool)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                if self.client_plugin().is_not_found(e):
                    if pool is None:
                        LOG.error(
                            _LE('Could not allocate floating IP. '
                                'Probably there is no default floating'
                                ' IP pool is configured.'))

        self.resource_id_set(floating_ip.id)
        self._floating_ip = floating_ip

    def _resolve_attribute(self, key):
        floating_ip = self._get_resource()
        attributes = {
            self.POOL_ATTR: getattr(floating_ip, self.POOL_ATTR, None),
            self.IP: floating_ip.ip
        }
        return six.text_type(attributes[key])
Exemple #9
0
 class DummyResource(generic_rsrc.GenericResource):
     attributes_schema = {"Foo": attributes.Schema("A test attribute")}
Exemple #10
0
class AutoScalingResourceGroup(aws_asg.AutoScalingGroup):
    """An autoscaling group that can scale arbitrary resources.

    An autoscaling group allows the creation of a desired count of similar
    resources, which are defined with the resource property in HOT format.
    If there is a need to create many of the same resources (e.g. one
    hundred sets of Server, WaitCondition and WaitConditionHandle or even
    Neutron Nets), AutoScalingGroup is a convenient and easy way to do that.
    """

    PROPERTIES = (
        RESOURCE,
        MAX_SIZE,
        MIN_SIZE,
        COOLDOWN,
        DESIRED_CAPACITY,
        ROLLING_UPDATES,
    ) = (
        'resource',
        'max_size',
        'min_size',
        'cooldown',
        'desired_capacity',
        'rolling_updates',
    )

    _ROLLING_UPDATES_SCHEMA = (
        MIN_IN_SERVICE,
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'min_in_service',
        'max_batch_size',
        'pause_time',
    )

    ATTRIBUTES = (
        OUTPUTS,
        OUTPUTS_LIST,
        CURRENT_SIZE,
        REFS,
        REFS_MAP,
    ) = (
        'outputs',
        'outputs_list',
        'current_size',
        'refs',
        'refs_map',
    )

    properties_schema = {
        RESOURCE:
        properties.Schema(
            properties.Schema.MAP,
            _('Resource definition for the resources in the group, in HOT '
              'format. The value of this property is the definition of a '
              'resource just as if it had been declared in the template '
              'itself.'),
            required=True,
            update_allowed=True,
        ),
        MAX_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of resources in the group.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=0)],
        ),
        MIN_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Minimum number of resources in the group.'),
                          required=True,
                          update_allowed=True,
                          constraints=[constraints.Range(min=0)]),
        COOLDOWN:
        properties.Schema(properties.Schema.INTEGER,
                          _('Cooldown period, in seconds.'),
                          update_allowed=True),
        DESIRED_CAPACITY:
        properties.Schema(properties.Schema.INTEGER,
                          _('Desired initial number of resources.'),
                          update_allowed=True),
        ROLLING_UPDATES:
        properties.Schema(
            properties.Schema.MAP,
            _('Policy for rolling updates for this scaling group.'),
            update_allowed=True,
            schema={
                MIN_IN_SERVICE:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('The minimum number of resources in service while '
                      'rolling updates are being executed.'),
                    constraints=[constraints.Range(min=0)],
                    default=0),
                MAX_BATCH_SIZE:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('The maximum number of resources to replace at once.'),
                    constraints=[constraints.Range(min=1)],
                    default=1),
                PAUSE_TIME:
                properties.Schema(
                    properties.Schema.NUMBER,
                    _('The number of seconds to wait between batches of '
                      'updates.'),
                    constraints=[constraints.Range(min=0)],
                    default=0),
            },
            # A default policy has all fields with their own default values.
            default={
                MIN_IN_SERVICE: 0,
                MAX_BATCH_SIZE: 1,
                PAUSE_TIME: 0,
            },
        ),
    }

    attributes_schema = {
        OUTPUTS:
        attributes.Schema(
            _("A map of resource names to the specified attribute of each "
              "individual resource that is part of the AutoScalingGroup. "
              "This map specifies output parameters that are available "
              "once the AutoScalingGroup has been instantiated."),
            support_status=support.SupportStatus(version='2014.2'),
            type=attributes.Schema.MAP),
        OUTPUTS_LIST:
        attributes.Schema(
            _("A list of the specified attribute of each individual resource "
              "that is part of the AutoScalingGroup. This list of attributes "
              "is available as an output once the AutoScalingGroup has been "
              "instantiated."),
            support_status=support.SupportStatus(version='2014.2'),
            type=attributes.Schema.LIST),
        CURRENT_SIZE:
        attributes.Schema(
            _("The current size of AutoscalingResourceGroup."),
            support_status=support.SupportStatus(version='2015.1'),
            type=attributes.Schema.INTEGER),
        REFS:
        attributes.Schema(
            _("A list of resource IDs for the resources in the group."),
            type=attributes.Schema.LIST,
            support_status=support.SupportStatus(version='7.0.0'),
        ),
        REFS_MAP:
        attributes.Schema(
            _("A map of resource names to IDs for the resources in "
              "the group."),
            type=attributes.Schema.MAP,
            support_status=support.SupportStatus(version='7.0.0'),
        ),
    }
    update_policy_schema = {}

    def _get_resource_definition(self):
        resource_def = self.properties[self.RESOURCE]
        defn_data = dict(HOTInterpreter()._rsrc_defn_args(
            None, 'member', resource_def))
        return rsrc_defn.ResourceDefinition(None, **defn_data)

    def _try_rolling_update(self, prop_diff):
        if self.RESOURCE in prop_diff:
            policy = self.properties[self.ROLLING_UPDATES]
            self._replace(policy[self.MIN_IN_SERVICE],
                          policy[self.MAX_BATCH_SIZE], policy[self.PAUSE_TIME])

    def _create_template(self,
                         num_instances,
                         num_replace=0,
                         template_version=('heat_template_version',
                                           '2015-04-30')):
        """Create a template in the HOT format for the nested stack."""
        return super(AutoScalingResourceGroup,
                     self)._create_template(num_instances,
                                            num_replace,
                                            template_version=template_version)

    def get_attribute(self, key, *path):
        if key == self.CURRENT_SIZE:
            return grouputils.get_size(self)
        if key == self.REFS:
            refs = grouputils.get_member_refids(self)
            return refs
        if key == self.REFS_MAP:
            members = grouputils.get_members(self)
            refs_map = {m.name: m.resource_id for m in members}
            return refs_map
        if path:
            members = grouputils.get_members(self)
            attrs = ((rsrc.name, rsrc.FnGetAtt(*path)) for rsrc in members)
            if key == self.OUTPUTS:
                return dict(attrs)
            if key == self.OUTPUTS_LIST:
                return [value for name, value in attrs]

        if key.startswith("resource."):
            return grouputils.get_nested_attrs(self, key, True, *path)

        raise exception.InvalidTemplateAttribute(resource=self.name, key=key)
Exemple #11
0
class Subnet(resource.Resource):

    PROPERTIES = (
        AVAILABILITY_ZONE,
        CIDR_BLOCK,
        VPC_ID,
        TAGS,
    ) = (
        'AvailabilityZone',
        'CidrBlock',
        'VpcId',
        'Tags',
    )

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    ATTRIBUTES = (AVAILABILITY_ZONE, )

    properties_schema = {
        AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('Availability zone in which you want the subnet.')),
        CIDR_BLOCK:
        properties.Schema(properties.Schema.STRING,
                          _('CIDR block to apply to subnet.'),
                          required=True),
        VPC_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Ref structure that contains the ID of the VPC on which you '
              'want to create the subnet.'),
            required=True),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              _('List of tags to attach to this resource.'),
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                              },
                              implemented=False,
                          )),
    }

    attributes_schema = {
        AVAILABILITY_ZONE:
        attributes.Schema(_('Availability Zone of the subnet.'),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'neutron'

    def handle_create(self):
        # TODO(sbaker) Verify that this CidrBlock is within the vpc CidrBlock
        network_id = self.properties.get(self.VPC_ID)

        props = {
            'network_id': network_id,
            'cidr': self.properties.get(self.CIDR_BLOCK),
            'name': self.physical_resource_name(),
            'ip_version': 4
        }
        subnet = self.client().create_subnet({'subnet': props})['subnet']
        self.resource_id_set(subnet['id'])

        router = vpc.VPC.router_for_vpc(self.client(), network_id)
        if router:
            self.client().add_interface_router(router['id'],
                                               {'subnet_id': subnet['id']})

    def handle_delete(self):
        if self.resource_id is None:
            return

        network_id = self.properties.get(self.VPC_ID)
        subnet_id = self.resource_id

        with self.client_plugin().ignore_not_found:
            router = vpc.VPC.router_for_vpc(self.client(), network_id)
            if router:
                self.client().remove_interface_router(router['id'],
                                                      {'subnet_id': subnet_id})

        with self.client_plugin().ignore_not_found:
            self.client().delete_subnet(subnet_id)

    def _resolve_attribute(self, name):
        if name == self.AVAILABILITY_ZONE:
            return self.properties.get(self.AVAILABILITY_ZONE)
Exemple #12
0
class FirewallRule(neutron.NeutronResource):
    """A resource for the FirewallRule resource in Neutron FWaaS.

    FirewallRule represents a collection of attributes like ports,
    ip addresses etc. which define match criteria and action (allow, or deny)
    that needs to be taken on the matched data traffic.
    """

    required_service_extension = 'fwaas'

    entity = 'firewall_rule'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        SHARED,
        PROTOCOL,
        IP_VERSION,
        SOURCE_IP_ADDRESS,
        DESTINATION_IP_ADDRESS,
        SOURCE_PORT,
        DESTINATION_PORT,
        ACTION,
        ENABLED,
    ) = (
        'name',
        'description',
        'shared',
        'protocol',
        'ip_version',
        'source_ip_address',
        'destination_ip_address',
        'source_port',
        'destination_port',
        'action',
        'enabled',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        DESCRIPTION_ATTR,
        FIREWALL_POLICY_ID,
        SHARED_ATTR,
        PROTOCOL_ATTR,
        IP_VERSION_ATTR,
        SOURCE_IP_ADDRESS_ATTR,
        DESTINATION_IP_ADDRESS_ATTR,
        SOURCE_PORT_ATTR,
        DESTINATION_PORT_ATTR,
        ACTION_ATTR,
        ENABLED_ATTR,
        POSITION,
        TENANT_ID,
    ) = (
        'name',
        'description',
        'firewall_policy_id',
        'shared',
        'protocol',
        'ip_version',
        'source_ip_address',
        'destination_ip_address',
        'source_port',
        'destination_port',
        'action',
        'enabled',
        'position',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the firewall rule.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the firewall rule.'),
                          update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this rule should be shared across all tenants.'),
            default=False,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(
            properties.Schema.STRING,
            _('Protocol for the firewall rule.'),
            constraints=[
                constraints.AllowedValues(['tcp', 'udp', 'icmp', 'any']),
            ],
            default='any',
            update_allowed=True,
        ),
        IP_VERSION:
        properties.Schema(properties.Schema.STRING,
                          _('Internet protocol version.'),
                          default='4',
                          constraints=[
                              constraints.AllowedValues(['4', '6']),
                          ],
                          update_allowed=True),
        SOURCE_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Source IP address or CIDR.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('net_cidr')]),
        DESTINATION_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Destination IP address or CIDR.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('net_cidr')]),
        SOURCE_PORT:
        properties.Schema(properties.Schema.STRING,
                          _('Source port number or a range.'),
                          update_allowed=True),
        DESTINATION_PORT:
        properties.Schema(properties.Schema.STRING,
                          _('Destination port number or a range.'),
                          update_allowed=True),
        ACTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Action to be performed on the traffic matching the rule.'),
            default='deny',
            constraints=[
                constraints.AllowedValues(['allow', 'deny']),
            ],
            update_allowed=True),
        ENABLED:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Whether this rule should be enabled.'),
                          default=True,
                          update_allowed=True),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('Name for the firewall rule.'),
                          type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the firewall rule.'),
                          type=attributes.Schema.STRING),
        FIREWALL_POLICY_ID:
        attributes.Schema(_(
            'Unique identifier of the firewall policy to which this '
            'firewall rule belongs.'),
                          type=attributes.Schema.STRING),
        SHARED_ATTR:
        attributes.Schema(_('Shared status of this firewall rule.'),
                          type=attributes.Schema.STRING),
        PROTOCOL_ATTR:
        attributes.Schema(_('Protocol value for this firewall rule.'),
                          type=attributes.Schema.STRING),
        IP_VERSION_ATTR:
        attributes.Schema(_('Ip_version for this firewall rule.'),
                          type=attributes.Schema.STRING),
        SOURCE_IP_ADDRESS_ATTR:
        attributes.Schema(_('Source ip_address for this firewall rule.'),
                          type=attributes.Schema.STRING),
        DESTINATION_IP_ADDRESS_ATTR:
        attributes.Schema(_('Destination ip_address for this firewall rule.'),
                          type=attributes.Schema.STRING),
        SOURCE_PORT_ATTR:
        attributes.Schema(_('Source port range for this firewall rule.'),
                          type=attributes.Schema.STRING),
        DESTINATION_PORT_ATTR:
        attributes.Schema(_('Destination port range for this firewall rule.'),
                          type=attributes.Schema.STRING),
        ACTION_ATTR:
        attributes.Schema(_('Allow or deny action for this firewall rule.'),
                          type=attributes.Schema.STRING),
        ENABLED_ATTR:
        attributes.Schema(
            _('Indicates whether this firewall rule is enabled or not.'),
            type=attributes.Schema.STRING),
        POSITION:
        attributes.Schema(
            _('Position of the rule within the firewall policy.'),
            type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Id of the tenant owning the firewall.'),
                          type=attributes.Schema.STRING),
    }

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        if props.get(self.PROTOCOL) == 'any':
            props[self.PROTOCOL] = None
        firewall_rule = self.client().create_firewall_rule(
            {'firewall_rule': props})['firewall_rule']
        self.resource_id_set(firewall_rule['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if prop_diff.get(self.PROTOCOL) == 'any':
                prop_diff[self.PROTOCOL] = None
            self.client().update_firewall_rule(self.resource_id,
                                               {'firewall_rule': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_firewall_rule(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
Exemple #13
0
class Firewall(neutron.NeutronResource):
    """A resource for the Firewall resource in Neutron FWaaS.

    Resource for using the Neutron firewall implementation. Firewall is a
    network security system that monitors and controls the incoming and
    outgoing network traffic based on predetermined security rules.
    """

    required_service_extension = 'fwaas'

    entity = 'firewall'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        ADMIN_STATE_UP,
        FIREWALL_POLICY_ID,
        VALUE_SPECS,
        SHARED,
    ) = (
        'name',
        'description',
        'admin_state_up',
        'firewall_policy_id',
        'value_specs',
        'shared',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        DESCRIPTION_ATTR,
        ADMIN_STATE_UP_ATTR,
        FIREWALL_POLICY_ID_ATTR,
        SHARED_ATTR,
        STATUS,
        TENANT_ID,
    ) = (
        'name',
        'description',
        'admin_state_up',
        'firewall_policy_id',
        'shared',
        'status',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the firewall.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the firewall.'),
                          update_allowed=True),
        ADMIN_STATE_UP:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Administrative state of the firewall. If false (down), '
              'firewall does not forward packets and will drop all '
              'traffic to/from VMs behind the firewall.'),
            default=True,
            update_allowed=True),
        FIREWALL_POLICY_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the firewall policy that this firewall is '
              'associated with.'),
            required=True,
            update_allowed=True),
        VALUE_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the request. Parameters '
              'are often specific to installed hardware or extensions.'),
            support_status=support.SupportStatus(version='5.0.0'),
            default={},
            update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this firewall should be shared across all tenants. '
              'NOTE: The default policy setting in Neutron restricts usage '
              'of this property to administrative users only.'),
            update_allowed=True,
            support_status=support.SupportStatus(
                status=support.UNSUPPORTED,
                message=_('There is no such option during 5.0.0, so need to '
                          'make this property unsupported while it not used.'),
                version='6.0.0',
                previous_status=support.SupportStatus(version='2015.1'))),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('Name for the firewall.'),
                          type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the firewall.'),
                          type=attributes.Schema.STRING),
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_('The administrative state of the firewall.'),
                          type=attributes.Schema.STRING),
        FIREWALL_POLICY_ID_ATTR:
        attributes.Schema(_(
            'Unique identifier of the firewall policy used to create '
            'the firewall.'),
                          type=attributes.Schema.STRING),
        SHARED_ATTR:
        attributes.Schema(
            _('Shared status of this firewall.'),
            support_status=support.SupportStatus(
                status=support.UNSUPPORTED,
                message=_('There is no such option during 5.0.0, so need to '
                          'make this attribute unsupported, otherwise error '
                          'will raised.'),
                version='6.0.0'),
            type=attributes.Schema.STRING),
        STATUS:
        attributes.Schema(_('The status of the firewall.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Id of the tenant owning the firewall.'),
                          type=attributes.Schema.STRING),
    }

    def check_create_complete(self, data):
        attributes = self._show_resource()
        status = attributes['status']
        if status == 'PENDING_CREATE':
            return False
        elif status == 'ACTIVE' or status == 'INACTIVE':
            return True
        elif status == 'ERROR':
            raise exception.ResourceInError(
                resource_status=status, status_reason=_('Error in Firewall'))
        else:
            raise exception.ResourceUnknownStatus(
                resource_status=status, result=_('Firewall creation failed'))

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        firewall = self.client().create_firewall({'firewall':
                                                  props})['firewall']
        self.resource_id_set(firewall['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_firewall(self.resource_id,
                                          {'firewall': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_firewall(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _resolve_attribute(self, name):
        if name == self.SHARED_ATTR:
            return ('This attribute is currently unsupported in neutron '
                    'firewall resource.')
        return super(Firewall, self)._resolve_attribute(name)

    def parse_live_resource_data(self, resource_properties, resource_data):
        result = super(Firewall,
                       self).parse_live_resource_data(resource_properties,
                                                      resource_data)
        if self.SHARED in result:
            result.pop(self.SHARED)
        return result
Exemple #14
0
class FirewallPolicy(neutron.NeutronResource):
    """A resource for the FirewallPolicy resource in Neutron FWaaS.

    FirewallPolicy resource is an ordered collection of firewall rules. A
    firewall policy can be shared across tenants.
    """

    required_service_extension = 'fwaas'

    entity = 'firewall_policy'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        SHARED,
        AUDITED,
        FIREWALL_RULES,
    ) = (
        'name',
        'description',
        'shared',
        'audited',
        'firewall_rules',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        DESCRIPTION_ATTR,
        FIREWALL_RULES_ATTR,
        SHARED_ATTR,
        AUDITED_ATTR,
        TENANT_ID,
    ) = (
        'name',
        'description',
        'firewall_rules',
        'shared',
        'audited',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the firewall policy.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the firewall policy.'),
                          update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this policy should be shared across all tenants.'),
            default=False,
            update_allowed=True),
        AUDITED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this policy should be audited. When set to True, '
              'each time the firewall policy or the associated firewall '
              'rules are changed, this attribute will be set to False and '
              'will have to be explicitly set to True through an update '
              'operation.'),
            default=False,
            update_allowed=True),
        FIREWALL_RULES:
        properties.Schema(
            properties.Schema.LIST,
            _('An ordered list of firewall rules to apply to the firewall. '
              '(Prior to version 14.0.0 this was a required property).'),
            update_allowed=True),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('Name for the firewall policy.'),
                          type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the firewall policy.'),
                          type=attributes.Schema.STRING),
        FIREWALL_RULES_ATTR:
        attributes.Schema(_('List of firewall rules in this firewall policy.'),
                          type=attributes.Schema.LIST),
        SHARED_ATTR:
        attributes.Schema(_('Shared status of this firewall policy.'),
                          type=attributes.Schema.STRING),
        AUDITED_ATTR:
        attributes.Schema(_('Audit status of this firewall policy.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Id of the tenant owning the firewall policy.'),
                          type=attributes.Schema.STRING),
    }

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        firewall_policy = self.client().create_firewall_policy(
            {'firewall_policy': props})['firewall_policy']
        self.resource_id_set(firewall_policy['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_firewall_policy(
                self.resource_id, {'firewall_policy': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_firewall_policy(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
Exemple #15
0
class RemoteStack(resource.Resource):
    """A Resource representing a stack.

    Stack can be created using specified context.
    """
    default_client_name = 'heat'

    PROPERTIES = (
        CONTEXT, TEMPLATE, TIMEOUT, PARAMETERS,
    ) = (
        'context', 'template', 'timeout', 'parameters',
    )

    ATTRIBUTES = (
        NAME_ATTR, OUTPUTS,
    ) = (
        'stack_name', 'outputs',
    )

    _CONTEXT_KEYS = (
        REGION_NAME
    ) = (
        'region_name'
    )

    properties_schema = {
        CONTEXT: properties.Schema(
            properties.Schema.MAP,
            _('Context for this stack.'),
            schema={
                REGION_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Region name in which this stack will be created.'),
                    required=True,
                )
            }
        ),
        TEMPLATE: properties.Schema(
            properties.Schema.STRING,
            _('Template that specifies the stack to be created as '
              'a resource.'),
            required=True,
            update_allowed=True
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.INTEGER,
            _('Number of minutes to wait for this stack creation.'),
            update_allowed=True
        ),
        PARAMETERS: properties.Schema(
            properties.Schema.MAP,
            _('Set of parameters passed to this stack.'),
            default={},
            update_allowed=True
        ),
    }

    attributes_schema = {
        NAME_ATTR: attributes.Schema(
            _('Name of the stack.'),
            type=attributes.Schema.STRING
        ),
        OUTPUTS: attributes.Schema(
            _('A dict of key-value pairs output from the stack.'),
            type=attributes.Schema.MAP
        ),
    }

    def __init__(self, name, definition, stack):
        super(RemoteStack, self).__init__(name, definition, stack)
        self._region_name = None
        self._local_context = None

    def _context(self):
        if self._local_context:
            return self._local_context

        ctx_props = self.properties.get(self.CONTEXT)
        if ctx_props:
            self._region_name = ctx_props[self.REGION_NAME]
        else:
            self._region_name = self.context.region_name

        # Build RequestContext from existing one
        dict_ctxt = self.context.to_dict()
        dict_ctxt.update({'region_name': self._region_name})
        self._local_context = context.RequestContext.from_dict(dict_ctxt)
        return self._local_context

    def heat(self):
        # A convenience method overriding Resource.heat()
        return self._context().clients.client(self.default_client_name)

    def client_plugin(self):
        # A convenience method overriding Resource.client_plugin()
        return self._context().clients.client_plugin(self.default_client_name)

    def validate(self):
        super(RemoteStack, self).validate()

        try:
            self.heat()
        except Exception as ex:
            exc_info = dict(region=self._region_name, exc=six.text_type(ex))
            msg = _('Cannot establish connection to Heat endpoint at region '
                    '"%(region)s" due to "%(exc)s"') % exc_info
            raise exception.StackValidationFailed(message=msg)

        try:
            params = self.properties[self.PARAMETERS]
            env = environment.get_child_environment(self.stack.env, params)
            tmpl = template_format.parse(self.properties[self.TEMPLATE])
            args = {
                'template': tmpl,
                'files': self.stack.t.files,
                'environment': env.user_env_as_dict(),
            }
            self.heat().stacks.validate(**args)
        except Exception as ex:
            exc_info = dict(region=self._region_name, exc=six.text_type(ex))
            LOG.error(_LE('exception: %s'), type(ex))
            msg = _('Failed validating stack template using Heat endpoint at '
                    'region "%(region)s" due to "%(exc)s"') % exc_info
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        params = self.properties[self.PARAMETERS]
        env = environment.get_child_environment(self.stack.env, params)
        tmpl = template_format.parse(self.properties[self.TEMPLATE])
        args = {
            'stack_name': self.physical_resource_name_or_FnGetRefId(),
            'template': tmpl,
            'timeout_mins': self.properties[self.TIMEOUT],
            'disable_rollback': True,
            'parameters': params,
            'files': self.stack.t.files,
            'environment': env.user_env_as_dict(),
        }
        remote_stack_id = self.heat().stacks.create(**args)['stack']['id']
        self.resource_id_set(remote_stack_id)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.heat().stacks.delete(stack_id=self.resource_id)

    def handle_resume(self):
        if self.resource_id is None:
            raise exception.Error(_('Cannot resume %s, resource not found')
                                  % self.name)
        self.heat().actions.resume(stack_id=self.resource_id)

    def handle_suspend(self):
        if self.resource_id is None:
            raise exception.Error(_('Cannot suspend %s, resource not found')
                                  % self.name)
        self.heat().actions.suspend(stack_id=self.resource_id)

    def handle_snapshot(self):
        snapshot = self.heat().stacks.snapshot(stack_id=self.resource_id)
        self.data_set('snapshot_id', snapshot['id'])

    def handle_restore(self, defn, restore_data):
        snapshot_id = restore_data['resource_data']['snapshot_id']
        snapshot = self.heat().stacks.snapshot_show(self.resource_id,
                                                    snapshot_id)
        s_data = snapshot['snapshot']['data']
        env = environment.Environment(s_data['environment'])
        files = s_data['files']
        tmpl = template.Template(s_data['template'], env=env, files=files)
        props = function.resolve(self.properties.data)
        props[self.TEMPLATE] = jsonutils.dumps(tmpl.t)
        props[self.PARAMETERS] = env.params

        return defn.freeze(properties=props)

    def handle_check(self):
        self.heat().actions.check(stack_id=self.resource_id)

    def _needs_update(self, after, before, after_props, before_props,
                      prev_resource, check_init_complete=True):
        # Always issue an update to the remote stack and let the individual
        # resources in it decide if they need updating.
        return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        # Always issue an update to the remote stack and let the individual
        # resources in it decide if they need updating.
        if self.resource_id:
            snippet = json_snippet.get('Properties', {})
            self.properties = properties.Properties(self.properties_schema,
                                                    snippet,
                                                    function.resolve,
                                                    self.name)

            params = self.properties[self.PARAMETERS]
            env = environment.get_child_environment(self.stack.env, params)
            tmpl = template_format.parse(self.properties[self.TEMPLATE])
            fields = {
                'stack_id': self.resource_id,
                'parameters': params,
                'template': tmpl,
                'timeout_mins': self.properties[self.TIMEOUT],
                'disable_rollback': self.stack.disable_rollback,
                'files': self.stack.t.files,
                'environment': env.user_env_as_dict(),
            }
            self.heat().stacks.update(**fields)

    def _check_action_complete(self, action):
        stack = self.heat().stacks.get(stack_id=self.resource_id)
        if stack.action != action:
            return False

        if stack.status == self.IN_PROGRESS:
            return False
        elif stack.status == self.COMPLETE:
            return True
        elif stack.status == self.FAILED:
            raise exception.ResourceInError(
                resource_status=stack.stack_status,
                status_reason=stack.stack_status_reason)
        else:
            # Note: this should never happen, so it really means that
            # the resource/engine is in serious problem if it happens.
            raise exception.ResourceUnknownStatus(
                resource_status=stack.stack_status,
                status_reason=stack.stack_status_reason)

    def check_create_complete(self, *args):
        return self._check_action_complete(action=self.CREATE)

    def check_delete_complete(self, *args):
        if self.resource_id is None:
            return True

        try:
            return self._check_action_complete(action=self.DELETE)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

    def check_resume_complete(self, *args):
        return self._check_action_complete(action=self.RESUME)

    def check_suspend_complete(self, *args):
        return self._check_action_complete(action=self.SUSPEND)

    def check_update_complete(self, *args):
        return self._check_action_complete(action=self.UPDATE)

    def check_snapshot_complete(self, *args):
        return self._check_action_complete(action=self.SNAPSHOT)

    def check_check_complete(self, *args):
        return self._check_action_complete(action=self.CHECK)

    def _resolve_attribute(self, name):
        stack = self.heat().stacks.get(stack_id=self.resource_id)
        if name == self.NAME_ATTR:
            value = getattr(stack, name, None)
            return value or self.physical_resource_name_or_FnGetRefId()

        if name == self.OUTPUTS:
            outputs = stack.outputs
            return dict((output['output_key'], output['output_value'])
                        for output in outputs)

    def get_reference_id(self):
        return self.resource_id
Exemple #16
0
 class DummyResource(generic_rsrc.GenericResource):
     support_status = support.SupportStatus()
     properties_schema = {}
     attributes_schema = {"Foo": attributes.Schema("A test attribute")}
Exemple #17
0
class CloudNetwork(resource.Resource):
    """A resource for creating Rackspace Cloud Networks.

    See http://www.rackspace.com/cloud/networks/ for service
    documentation.
    """

    support_status = support.SupportStatus(
        status=support.DEPRECATED,
        message=_('Use OS::Neutron::Net instead.'),
        version='2015.1')

    PROPERTIES = (LABEL, CIDR) = ("label", "cidr")

    ATTRIBUTES = (
        CIDR_ATTR,
        LABEL_ATTR,
    ) = (
        'cidr',
        'label',
    )

    properties_schema = {
        LABEL:
        properties.Schema(properties.Schema.STRING,
                          _("The name of the network."),
                          required=True,
                          constraints=[constraints.Length(min=3, max=64)]),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _("The IP block from which to allocate the network. For example, "
              "172.16.0.0/24 or 2001:DB8::/64."),
            required=True)
    }

    attributes_schema = {
        CIDR_ATTR:
        attributes.Schema(_("The CIDR for an isolated private network.")),
        LABEL_ATTR:
        attributes.Schema(_("The name of the network.")),
    }

    def __init__(self, name, json_snippet, stack):
        resource.Resource.__init__(self, name, json_snippet, stack)
        self._network = None

    def network(self):
        if self.resource_id and not self._network:
            try:
                self._network = self.cloud_networks().get(self.resource_id)
            except NotFound:
                LOG.warn(
                    _LW("Could not find network %s but resource id is"
                        " set."), self.resource_id)
        return self._network

    def cloud_networks(self):
        return self.client('cloud_networks')

    def handle_create(self):
        cnw = self.cloud_networks().create(label=self.properties[self.LABEL],
                                           cidr=self.properties[self.CIDR])
        self.resource_id_set(cnw.id)

    def handle_check(self):
        self.cloud_networks().get(self.resource_id)

    def handle_delete(self):
        '''Delete cloud network.

        Cloud Network doesn't have a status attribute, and there is a non-zero
        window between the deletion of a server and the acknowledgement from
        the cloud network that it's no longer in use, so it needs some way to
        keep track of when the delete call was successfully issued.
        '''
        network_info = {
            'delete_issued': False,
            'network': self.network(),
        }
        return network_info

    def check_delete_complete(self, network_info):
        network = network_info['network']

        if not network:
            return True

        if not network_info['delete_issued']:
            try:
                network.delete()
            except NetworkInUse:
                LOG.warn("Network '%s' still in use." % network.id)
            else:
                network_info['delete_issued'] = True
            return False

        try:
            network.get()
        except NotFound:
            return True

        return False

    def validate(self):
        super(CloudNetwork, self).validate()
        try:
            netaddr.IPNetwork(self.properties[self.CIDR])
        except netaddr.core.AddrFormatError:
            raise exception.StackValidationFailed(message=_("Invalid cidr"))

    def _resolve_attribute(self, name):
        net = self.network()
        if net:
            return six.text_type(getattr(net, name))
        return ""
Exemple #18
0
class Cluster(res_base.BaseSenlinResource):
    """A resource that creates a Senlin Cluster.

    Cluster resource in senlin can create and manage objects of
    the same nature, e.g. Nova servers, Heat stacks, Cinder volumes, etc.
    The collection of these objects is referred to as a cluster.
    """

    entity = 'cluster'

    PROPERTIES = (
        NAME,
        PROFILE,
        DESIRED_CAPACITY,
        MIN_SIZE,
        MAX_SIZE,
        METADATA,
        TIMEOUT,
        POLICIES,
    ) = (
        'name',
        'profile',
        'desired_capacity',
        'min_size',
        'max_size',
        'metadata',
        'timeout',
        'policies',
    )

    ATTRIBUTES = (
        ATTR_NAME,
        ATTR_METADATA,
        ATTR_NODES,
        ATTR_DESIRED_CAPACITY,
        ATTR_MIN_SIZE,
        ATTR_MAX_SIZE,
        ATTR_POLICIES,
        ATTR_COLLECT,
    ) = (
        "name",
        'metadata',
        'nodes',
        'desired_capacity',
        'min_size',
        'max_size',
        'policies',
        'collect',
    )

    _POLICIES = (
        P_POLICY,
        P_ENABLED,
    ) = (
        "policy",
        "enabled",
    )

    _CLUSTER_STATUS = (CLUSTER_INIT, CLUSTER_ACTIVE, CLUSTER_ERROR,
                       CLUSTER_WARNING, CLUSTER_CREATING, CLUSTER_DELETING,
                       CLUSTER_UPDATING) = ('INIT', 'ACTIVE', 'ERROR',
                                            'WARNING', 'CREATING', 'DELETING',
                                            'UPDATING')

    properties_schema = {
        PROFILE:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or id of the Senlin profile.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('senlin.profile')]),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the cluster. By default, physical resource name '
              'is used.'),
            update_allowed=True,
        ),
        DESIRED_CAPACITY:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Desired initial number of resources in cluster.'),
            default=0,
            update_allowed=True,
        ),
        MIN_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Minimum number of resources in the cluster.'),
                          default=0,
                          update_allowed=True,
                          constraints=[constraints.Range(min=0)]),
        MAX_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Maximum number of resources in the cluster. '
                            '-1 means unlimited.'),
                          default=-1,
                          update_allowed=True,
                          constraints=[constraints.Range(min=-1)]),
        METADATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Metadata key-values defined for cluster.'),
            update_allowed=True,
            default={},
        ),
        TIMEOUT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of seconds to wait for the cluster actions.'),
            update_allowed=True,
            constraints=[constraints.Range(min=0)]),
        POLICIES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of policies to attach to this cluster.'),
            update_allowed=True,
            default=[],
            support_status=support.SupportStatus(version='8.0.0'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    P_POLICY:
                    properties.Schema(
                        properties.Schema.STRING,
                        _("The name or ID of the policy."),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('senlin.policy')
                        ]),
                    P_ENABLED:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _("Whether enable this policy on this cluster."),
                        default=True,
                    ),
                })),
    }

    attributes_schema = {
        ATTR_NAME:
        attributes.Schema(_("Cluster name."), type=attributes.Schema.STRING),
        ATTR_METADATA:
        attributes.Schema(_("Cluster metadata."), type=attributes.Schema.MAP),
        ATTR_DESIRED_CAPACITY:
        attributes.Schema(_("Desired capacity of the cluster."),
                          type=attributes.Schema.INTEGER),
        ATTR_NODES:
        attributes.Schema(_("Nodes list in the cluster."),
                          type=attributes.Schema.LIST,
                          cache_mode=attributes.Schema.CACHE_NONE),
        ATTR_MIN_SIZE:
        attributes.Schema(_("Min size of the cluster."),
                          type=attributes.Schema.INTEGER),
        ATTR_MAX_SIZE:
        attributes.Schema(_("Max size of the cluster."),
                          type=attributes.Schema.INTEGER),
        ATTR_POLICIES:
        attributes.Schema(
            _("Policies attached to the cluster."),
            type=attributes.Schema.LIST,
            support_status=support.SupportStatus(version='8.0.0'),
        ),
        ATTR_COLLECT:
        attributes.Schema(
            _("Attributes collected from cluster. According to the jsonpath "
              "following this attribute, it will return a list of attributes "
              "collected from the nodes of this cluster."),
            type=attributes.Schema.LIST,
            support_status=support.SupportStatus(version='8.0.0'),
            cache_mode=attributes.Schema.CACHE_NONE)
    }

    def translation_rules(self, props):
        rules = [
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        translation_path=[self.PROFILE],
                                        client_plugin=self.client_plugin(),
                                        finder='get_profile_id'),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                translation_path=[self.POLICIES, self.P_POLICY],
                client_plugin=self.client_plugin(),
                finder='get_policy_id'),
        ]
        return rules

    def handle_create(self):
        actions = []
        params = {
            'name': (self.properties[self.NAME]
                     or self.physical_resource_name()),
            'profile_id': self.properties[self.PROFILE],
            'desired_capacity': self.properties[self.DESIRED_CAPACITY],
            'min_size': self.properties[self.MIN_SIZE],
            'max_size': self.properties[self.MAX_SIZE],
            'metadata': self.properties[self.METADATA],
            'timeout': self.properties[self.TIMEOUT]
        }

        cluster = self.client().create_cluster(**params)
        action_id = cluster.location.split('/')[-1]
        self.resource_id_set(cluster.id)
        # for cluster creation, we just to check the action status
        # the action is executed above
        action = {
            'action_id': action_id,
            'done': False,
        }
        actions.append(action)
        if self.properties[self.POLICIES]:
            for p in self.properties[self.POLICIES]:
                params = {
                    'cluster': cluster.id,
                    'policy': p[self.P_POLICY],
                    'enabled': p[self.P_ENABLED],
                }
                action = {
                    'func': 'cluster_attach_policy',
                    'params': params,
                    'action_id': None,
                    'done': False,
                }
                actions.append(action)
        return actions

    def check_create_complete(self, actions):
        return self.client_plugin().execute_actions(actions)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().delete_cluster(self.resource_id)
        return self.resource_id

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        try:
            self.client().get_cluster(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True
        return False

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        UPDATE_PROPS = (self.NAME, self.METADATA, self.TIMEOUT, self.PROFILE)
        RESIZE_PROPS = (self.MIN_SIZE, self.MAX_SIZE, self.DESIRED_CAPACITY)
        actions = []
        if not prop_diff:
            return actions
        cluster_obj = self.client().get_cluster(self.resource_id)
        # Update Policies
        if self.POLICIES in prop_diff:
            old_policies = self.properties[self.POLICIES]
            new_policies = prop_diff[self.POLICIES]
            old_policy_ids = [p[self.P_POLICY] for p in old_policies]
            update_policies = [
                p for p in new_policies if p[self.P_POLICY] in old_policy_ids
            ]
            update_policy_ids = [p[self.P_POLICY] for p in update_policies]
            add_policies = [
                p for p in new_policies
                if p[self.P_POLICY] not in old_policy_ids
            ]
            remove_policies = [
                p for p in old_policies
                if p[self.P_POLICY] not in update_policy_ids
            ]
            for p in update_policies:
                params = {
                    'policy': p[self.P_POLICY],
                    'cluster': self.resource_id,
                    'enabled': p[self.P_ENABLED]
                }
                action = {
                    'func': 'cluster_update_policy',
                    'params': params,
                    'action_id': None,
                    'done': False,
                }
                actions.append(action)
            for p in remove_policies:
                params = {
                    'policy': p[self.P_POLICY],
                    'cluster': self.resource_id,
                    'enabled': p[self.P_ENABLED]
                }
                action = {
                    'func': 'cluster_detach_policy',
                    'params': params,
                    'action_id': None,
                    'done': False,
                }
                actions.append(action)
            for p in add_policies:
                params = {
                    'policy': p[self.P_POLICY],
                    'cluster': self.resource_id,
                    'enabled': p[self.P_ENABLED]
                }
                action = {
                    'func': 'cluster_attach_policy',
                    'params': params,
                    'action_id': None,
                    'done': False,
                }
                actions.append(action)
        # Update cluster
        if any(p in prop_diff for p in UPDATE_PROPS):
            params = dict((k, v) for k, v in six.iteritems(prop_diff)
                          if k in UPDATE_PROPS)
            params['cluster'] = cluster_obj
            if self.PROFILE in params:
                params['profile_id'] = params.pop(self.PROFILE)
            action = {
                'func': 'update_cluster',
                'params': params,
                'action_id': None,
                'done': False,
            }
            actions.append(action)
        # Resize Cluster
        if any(p in prop_diff for p in RESIZE_PROPS):
            params = dict((k, v) for k, v in six.iteritems(prop_diff)
                          if k in RESIZE_PROPS)
            if self.DESIRED_CAPACITY in params:
                params['adjustment_type'] = 'EXACT_CAPACITY'
                params['number'] = params.pop(self.DESIRED_CAPACITY)
            params['cluster'] = self.resource_id
            action = {
                'func': 'cluster_resize',
                'params': params,
                'action_id': None,
                'done': False,
            }
            actions.append(action)
        return actions

    def check_update_complete(self, actions):
        return self.client_plugin().execute_actions(actions)

    def validate(self):
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]
        desired_capacity = self.properties[self.DESIRED_CAPACITY]

        if max_size != -1 and max_size < min_size:
            msg = _("%(min_size)s can not be greater than %(max_size)s") % {
                'min_size': self.MIN_SIZE,
                'max_size': self.MAX_SIZE,
            }
            raise exception.StackValidationFailed(message=msg)

        if (desired_capacity < min_size
                or (max_size != -1 and desired_capacity > max_size)):
            msg = _("%(desired_capacity)s must be between %(min_size)s "
                    "and %(max_size)s") % {
                        'desired_capacity': self.DESIRED_CAPACITY,
                        'min_size': self.MIN_SIZE,
                        'max_size': self.MAX_SIZE,
                    }
            raise exception.StackValidationFailed(message=msg)

    def get_attribute(self, key, *path):
        if self.resource_id is None:
            return None

        if key == self.ATTR_COLLECT:
            if not path:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            attrs = self.client().collect_cluster_attrs(
                self.resource_id, path[0])
            attr = [attr.attr_value for attr in attrs]
            return attributes.select_from_attribute(attr, path[1:])
        else:
            return super(Cluster, self).get_attribute(key, *path)

    def _show_resource(self):
        cluster_dict = super(Cluster, self)._show_resource()
        cluster_dict[self.ATTR_POLICIES] = self.client().cluster_policies(
            self.resource_id)
        return cluster_dict

    def parse_live_resource_data(self, resource_properties, resource_data):
        reality = {}

        for key in self._update_allowed_properties:
            if key == self.PROFILE:
                value = resource_data.get('profile_id')
            elif key == self.POLICIES:
                value = []
                for p in resource_data.get(self.POLICIES):
                    v = {
                        'policy': p.get('policy_id'),
                        'enabled': p.get('enabled'),
                    }
                    value.append(v)
            else:
                value = resource_data.get(key)
            reality.update({key: value})

        return reality
Exemple #19
0
class Trunk(neutron.NeutronResource):
    """A resource for managing Neutron trunks.

    Requires Neutron Trunk Extension to be enabled::

      $ openstack extension show trunk

    The network trunk service allows multiple networks to be connected to
    an instance using a single virtual NIC (vNIC). Multiple networks can
    be presented to an instance by connecting the instance to a single port.

    Users can create a port, associate it with a trunk (as the trunk's parent)
    and launch an instance on that port. Users can dynamically attach and
    detach additional networks without disrupting operation of the instance.

    Every trunk has a parent port and can have any number (0, 1, ...) of
    subports. The parent port is the port that the instance is directly
    associated with and its traffic is always untagged inside the instance.
    Users must specify the parent port of the trunk when launching an
    instance attached to a trunk.

    A network presented by a subport is the network of the associated port.
    When creating a subport, a ``segmentation_type`` and ``segmentation_id``
    may be required by the driver so the user can distinguish the networks
    inside the instance. As of release Pike only ``segmentation_type``
    ``vlan`` is supported. ``segmentation_id`` defines the segmentation ID
    on which the subport network is presented to the instance.

    Note that some Neutron backends (primarily Open vSwitch) only allow
    trunk creation before an instance is booted on the parent port. To avoid
    a possible race condition when booting an instance with a trunk it is
    strongly recommended to refer to the trunk's parent port indirectly in
    the template via ``get_attr``. For example::

      trunk:
        type: OS::Neutron::Trunk
        properties:
          port: ...
      instance:
        type: OS::Nova::Server
        properties:
          networks:
            - { port: { get_attr: [trunk, port_id] } }

    Though other Neutron backends may tolerate the direct port reference
    (and the possible reverse ordering of API requests implied) it's a good
    idea to avoid writing Neutron backend specific templates.
    """

    entity = 'trunk'

    required_service_extension = 'trunk'

    support_status = support.SupportStatus(
        status=support.SUPPORTED,
        version='9.0.0',
    )

    PROPERTIES = (
        NAME, PARENT_PORT, SUB_PORTS, DESCRIPTION, ADMIN_STATE_UP,
    ) = (
        'name', 'port', 'sub_ports', 'description', 'admin_state_up',
    )

    _SUBPORT_KEYS = (
        PORT, SEGMENTATION_TYPE, SEGMENTATION_ID,
    ) = (
        'port', 'segmentation_type', 'segmentation_id',
    )

    _subport_schema = {
        PORT: properties.Schema(
            properties.Schema.STRING,
            _('ID or name of a port to be used as a subport.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('neutron.port'),
            ],
        ),
        SEGMENTATION_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Segmentation type to be used on the subport.'),
            required=True,
            # TODO(nilles): custom constraint 'neutron.trunk_segmentation_type'
            constraints=[
                constraints.AllowedValues(['vlan']),
            ],
        ),
        SEGMENTATION_ID: properties.Schema(
            properties.Schema.INTEGER,
            _('The segmentation ID on which the subport network is presented '
              'to the instance.'),
            required=True,
            # TODO(nilles): custom constraint 'neutron.trunk_segmentation_id'
            constraints=[
                constraints.Range(1, 4094),
            ],
        ),
    }

    ATTRIBUTES = (
        PORT_ATTR,
    ) = (
        'port_id',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('A string specifying a symbolic name for the trunk, which is '
              'not required to be uniqe.'),
            update_allowed=True,
        ),
        PARENT_PORT: properties.Schema(
            properties.Schema.STRING,
            _('ID or name of a port to be used as a parent port.'),
            required=True,
            immutable=True,
            constraints=[
                constraints.CustomConstraint('neutron.port'),
            ],
        ),
        SUB_PORTS: properties.Schema(
            properties.Schema.LIST,
            _('List with 0 or more map elements containing subport details.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema=_subport_schema,
            ),
            update_allowed=True,
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description for the trunk.'),
            update_allowed=True,
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Enable/disable subport addition, removal and trunk delete.'),
            update_allowed=True,
        ),
    }

    attributes_schema = {
        PORT_ATTR: attributes.Schema(
            _('ID or name of a port used as a parent port.'),
            type=attributes.Schema.STRING,
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.PARENT_PORT],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='port',
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                translation_path=[self.SUB_PORTS, self.PORT],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='port',
            ),
        ]

    def handle_create(self):
        props = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        props['port_id'] = props.pop(self.PARENT_PORT)

        if self.SUB_PORTS in props and props[self.SUB_PORTS]:
            for sub_port in props[self.SUB_PORTS]:
                sub_port['port_id'] = sub_port.pop(self.PORT)

        LOG.debug('attempt to create trunk: %s', props)
        trunk = self.client().create_trunk({'trunk': props})['trunk']
        self.resource_id_set(trunk['id'])

    def check_create_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                LOG.debug('attempt to delete trunk: %s', self.resource_id)
                self.client().delete_trunk(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Handle update to a trunk in (at most) three neutron calls.

        Call #1) Update all changed properties but 'sub_ports'.
            PUT /v2.0/trunks/TRUNK_ID
            openstack network trunk set

        Call #2) Delete subports not needed anymore.
            PUT /v2.0/trunks/TRUNK_ID/remove_subports
            openstack network trunk unset --subport

        Call #3) Create new subports.
            PUT /v2.0/trunks/TRUNK_ID/add_subports
            openstack network trunk set --subport

        A single neutron port cannot be two subports at the same time (ie.
        have two segmentation (type, ID)s on the same trunk or to belong to
        two trunks). Therefore we have to delete old subports before creating
        new ones to avoid conflicts.
        """

        LOG.debug('attempt to update trunk %s', self.resource_id)

        # NOTE(bence romsics): We want to do set operations on the subports,
        # however we receive subports represented as dicts. In Python
        # mutable objects like dicts are not hashable so they cannot be
        # inserted into sets. So we convert subport dicts to (immutable)
        # frozensets in order to do the set operations.
        def dict2frozenset(d):
            """Convert a dict to a frozenset.

            Create an immutable equivalent of a dict, so it's hashable
            therefore can be used as an element of a set or a key of another
            dictionary.
            """
            return frozenset(d.items())

        # NOTE(bence romsics): prop_diff contains a shallow diff of the
        # properties, so if we had used that to update subports we would
        # re-create all subports even if just a single subport changed. So we
        # need to forget about prop_diff['sub_ports'] and diff out the real
        # subport changes from self.properties and json_snippet.
        if 'sub_ports' in prop_diff:
            del prop_diff['sub_ports']

        sub_ports_prop_old = self.properties[self.SUB_PORTS] or []
        sub_ports_prop_new = json_snippet.properties(
            self.properties_schema)[self.SUB_PORTS] or []

        subports_old = {dict2frozenset(d): d for d in sub_ports_prop_old}
        subports_new = {dict2frozenset(d): d for d in sub_ports_prop_new}

        old_set = set(subports_old.keys())
        new_set = set(subports_new.keys())

        delete = old_set - new_set
        create = new_set - old_set

        dicts_delete = [subports_old[fs] for fs in delete]
        dicts_create = [subports_new[fs] for fs in create]

        LOG.debug('attempt to delete subports of trunk %s: %s',
                  self.resource_id, dicts_delete)
        LOG.debug('attempt to create subports of trunk %s: %s',
                  self.resource_id, dicts_create)

        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_trunk(self.resource_id, {'trunk': prop_diff})

        if dicts_delete:
            delete_body = self.prepare_trunk_remove_subports_body(dicts_delete)
            self.client().trunk_remove_subports(self.resource_id, delete_body)

        if dicts_create:
            create_body = self.prepare_trunk_add_subports_body(dicts_create)
            self.client().trunk_add_subports(self.resource_id, create_body)

    def check_update_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    @staticmethod
    def prepare_trunk_remove_subports_body(subports):
        """Prepares body for PUT /v2.0/trunks/TRUNK_ID/remove_subports."""

        return {
            'sub_ports': [
                {'port_id': sp['port']} for sp in subports
            ]
        }

    @staticmethod
    def prepare_trunk_add_subports_body(subports):
        """Prepares body for PUT /v2.0/trunks/TRUNK_ID/add_subports."""

        return {
            'sub_ports': [
                {'port_id': sp['port'],
                 'segmentation_type': sp['segmentation_type'],
                 'segmentation_id': sp['segmentation_id']}
                for sp in subports
            ]
        }
Exemple #20
0
class CloudLoadBalancer(resource.Resource):
    """Represents a Rackspace Cloud Loadbalancer."""

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (
        NAME,
        NODES,
        PROTOCOL,
        ACCESS_LIST,
        HALF_CLOSED,
        ALGORITHM,
        CONNECTION_LOGGING,
        METADATA,
        PORT,
        TIMEOUT,
        CONNECTION_THROTTLE,
        SESSION_PERSISTENCE,
        VIRTUAL_IPS,
        CONTENT_CACHING,
        HEALTH_MONITOR,
        SSL_TERMINATION,
        ERROR_PAGE,
        HTTPS_REDIRECT,
    ) = (
        'name',
        'nodes',
        'protocol',
        'accessList',
        'halfClosed',
        'algorithm',
        'connectionLogging',
        'metadata',
        'port',
        'timeout',
        'connectionThrottle',
        'sessionPersistence',
        'virtualIps',
        'contentCaching',
        'healthMonitor',
        'sslTermination',
        'errorPage',
        'httpsRedirect',
    )

    LB_UPDATE_PROPS = (NAME, ALGORITHM, PROTOCOL, HALF_CLOSED, PORT, TIMEOUT,
                       HTTPS_REDIRECT)

    _NODE_KEYS = (
        NODE_ADDRESSES,
        NODE_PORT,
        NODE_CONDITION,
        NODE_TYPE,
        NODE_WEIGHT,
    ) = (
        'addresses',
        'port',
        'condition',
        'type',
        'weight',
    )

    _ACCESS_LIST_KEYS = (
        ACCESS_LIST_ADDRESS,
        ACCESS_LIST_TYPE,
    ) = (
        'address',
        'type',
    )

    _CONNECTION_THROTTLE_KEYS = (
        CONNECTION_THROTTLE_MAX_CONNECTION_RATE,
        CONNECTION_THROTTLE_MIN_CONNECTIONS,
        CONNECTION_THROTTLE_MAX_CONNECTIONS,
        CONNECTION_THROTTLE_RATE_INTERVAL,
    ) = (
        'maxConnectionRate',
        'minConnections',
        'maxConnections',
        'rateInterval',
    )

    _VIRTUAL_IP_KEYS = (VIRTUAL_IP_TYPE, VIRTUAL_IP_IP_VERSION,
                        VIRTUAL_IP_ID) = ('type', 'ipVersion', 'id')

    _HEALTH_MONITOR_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
        HEALTH_MONITOR_BODY_REGEX,
        HEALTH_MONITOR_HOST_HEADER,
        HEALTH_MONITOR_PATH,
        HEALTH_MONITOR_STATUS_REGEX,
    ) = (
        'attemptsBeforeDeactivation',
        'delay',
        'timeout',
        'type',
        'bodyRegex',
        'hostHeader',
        'path',
        'statusRegex',
    )
    _HEALTH_MONITOR_CONNECT_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
    )

    _SSL_TERMINATION_KEYS = (
        SSL_TERMINATION_SECURE_PORT,
        SSL_TERMINATION_PRIVATEKEY,
        SSL_TERMINATION_CERTIFICATE,
        SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
        SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
    ) = (
        'securePort',
        'privatekey',
        'certificate',
        'intermediateCertificate',
        'secureTrafficOnly',
    )

    ATTRIBUTES = (PUBLIC_IP, VIPS) = ('PublicIp', 'virtualIps')

    ALGORITHMS = [
        "LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
        "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"
    ]

    _health_monitor_schema = {
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 10),
                          ]),
        HEALTH_MONITOR_DELAY:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 3600),
                          ]),
        HEALTH_MONITOR_TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 300),
                          ]),
        HEALTH_MONITOR_TYPE:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['CONNECT', 'HTTP', 'HTTPS']),
                          ]),
        HEALTH_MONITOR_BODY_REGEX:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_HOST_HEADER:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_PATH:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_STATUS_REGEX:
        properties.Schema(properties.Schema.STRING),
    }

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        NODES:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NODE_ADDRESSES:
                    properties.Schema(
                        properties.Schema.LIST,
                        required=True,
                        description=(_("IP addresses for the load balancer "
                                       "node. Must have at least one "
                                       "address.")),
                        schema=properties.Schema(properties.Schema.STRING)),
                    NODE_PORT:
                    properties.Schema(properties.Schema.INTEGER,
                                      required=True),
                    NODE_CONDITION:
                    properties.Schema(properties.Schema.STRING,
                                      default='ENABLED',
                                      constraints=[
                                          constraints.AllowedValues([
                                              'ENABLED', 'DISABLED', 'DRAINING'
                                          ]),
                                      ]),
                    NODE_TYPE:
                    properties.Schema(properties.Schema.STRING,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['PRIMARY', 'SECONDARY']),
                                      ]),
                    NODE_WEIGHT:
                    properties.Schema(properties.Schema.NUMBER,
                                      constraints=[
                                          constraints.Range(1, 100),
                                      ]),
                },
            ),
            required=True,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS',
                                  'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL',
                                  'POP3', 'POP3S', 'SMTP', 'TCP',
                                  'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM',
                                  'SFTP'
                              ]),
                          ],
                          update_allowed=True),
        ACCESS_LIST:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  ACCESS_LIST_ADDRESS:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  ACCESS_LIST_TYPE:
                                  properties.Schema(
                                      properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['ALLOW', 'DENY']),
                                      ]),
                              },
                          )),
        HALF_CLOSED:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        ALGORITHM:
        properties.Schema(properties.Schema.STRING,
                          constraints=[constraints.AllowedValues(ALGORITHMS)],
                          update_allowed=True),
        CONNECTION_LOGGING:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        METADATA:
        properties.Schema(properties.Schema.MAP, update_allowed=True),
        PORT:
        properties.Schema(properties.Schema.INTEGER,
                          required=True,
                          update_allowed=True),
        TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          constraints=[
                              constraints.Range(1, 120),
                          ],
                          update_allowed=True),
        CONNECTION_THROTTLE:
        properties.Schema(properties.Schema.MAP,
                          schema={
                              CONNECTION_THROTTLE_MAX_CONNECTION_RATE:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(
                                                        0, 100000),
                                                ]),
                              CONNECTION_THROTTLE_MIN_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(1, 1000),
                                                ]),
                              CONNECTION_THROTTLE_MAX_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(
                                                        1, 100000),
                                                ]),
                              CONNECTION_THROTTLE_RATE_INTERVAL:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(1, 3600),
                                                ]),
                          },
                          update_allowed=True),
        SESSION_PERSISTENCE:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['HTTP_COOKIE', 'SOURCE_IP']),
                          ],
                          update_allowed=True),
        VIRTUAL_IPS:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    VIRTUAL_IP_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        "The type of VIP (public or internal). This property"
                        " cannot be specified if 'id' is specified. This "
                        "property must be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['SERVICENET',
                                                       'PUBLIC']),
                        ]),
                    VIRTUAL_IP_IP_VERSION:
                    properties.Schema(
                        properties.Schema.STRING,
                        "IP version of the VIP. This property cannot be "
                        "specified if 'id' is specified. This property must "
                        "be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['IPV6', 'IPV4']),
                        ]),
                    VIRTUAL_IP_ID:
                    properties.Schema(
                        properties.Schema.NUMBER,
                        "ID of a shared VIP to use instead of creating a "
                        "new one. This property cannot be specified if type"
                        " or version is specified.")
                },
            ),
            required=True,
            constraints=[constraints.Length(min=1)]),
        CONTENT_CACHING:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['ENABLED', 'DISABLED']),
                          ],
                          update_allowed=True),
        HEALTH_MONITOR:
        properties.Schema(properties.Schema.MAP,
                          schema=_health_monitor_schema,
                          update_allowed=True),
        SSL_TERMINATION:
        properties.Schema(
            properties.Schema.MAP,
            schema={
                SSL_TERMINATION_SECURE_PORT:
                properties.Schema(properties.Schema.INTEGER, default=443),
                SSL_TERMINATION_PRIVATEKEY:
                properties.Schema(properties.Schema.STRING, required=True),
                SSL_TERMINATION_CERTIFICATE:
                properties.Schema(properties.Schema.STRING, required=True),
                # only required if configuring intermediate ssl termination
                # add to custom validation
                SSL_TERMINATION_INTERMEDIATE_CERTIFICATE:
                properties.Schema(properties.Schema.STRING),
                # pyrax will default to false
                SSL_TERMINATION_SECURE_TRAFFIC_ONLY:
                properties.Schema(properties.Schema.BOOLEAN, default=False),
            },
            update_allowed=True),
        ERROR_PAGE:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        HTTPS_REDIRECT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Enables or disables HTTP to HTTPS redirection for the load "
              "balancer. When enabled, any HTTP request returns status code "
              "301 (Moved Permanently), and the requester is redirected to "
              "the requested URL via the HTTPS protocol on port 443. Only "
              "available for HTTPS protocol (port=443), or HTTP protocol with "
              "a properly configured SSL termination (secureTrafficOnly=true, "
              "securePort=443)."),
            update_allowed=True,
            default=False,
            support_status=support.SupportStatus(version="2015.1"))
    }

    attributes_schema = {
        PUBLIC_IP:
        attributes.Schema(_('Public IP address of the specified instance.')),
        VIPS:
        attributes.Schema(_("A list of assigned virtual ip addresses"))
    }

    ACTIVE_STATUS = 'ACTIVE'
    DELETED_STATUS = 'DELETED'
    PENDING_DELETE_STATUS = 'PENDING_DELETE'

    def __init__(self, name, json_snippet, stack):
        super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
        self.clb = self.cloud_lb()

    def cloud_lb(self):
        return self.client('cloud_lb')

    def _setup_properties(self, properties, function):
        """Use defined schema properties as kwargs for loadbalancer objects."""
        if properties and function:
            return [
                function(**self._remove_none(item_dict))
                for item_dict in properties
            ]
        elif function:
            return [function()]

    def _alter_properties_for_api(self):
        """Set up required, but useless, key/value pairs.

        The following properties have useless key/value pairs which must
        be passed into the api. Set them up to make template definition easier.
        """
        session_persistence = None
        if self.SESSION_PERSISTENCE in self.properties.data:
            session_persistence = {
                'persistenceType': self.properties[self.SESSION_PERSISTENCE]
            }
        connection_logging = None
        if self.CONNECTION_LOGGING in self.properties.data:
            connection_logging = {
                "enabled": self.properties[self.CONNECTION_LOGGING]
            }
        metadata = None
        if self.METADATA in self.properties.data:
            metadata = [{
                'key': k,
                'value': v
            } for k, v in six.iteritems(self.properties[self.METADATA])]

        return (session_persistence, connection_logging, metadata)

    def _check_active(self):
        """Update the loadbalancer state, check the status."""
        loadbalancer = self.clb.get(self.resource_id)
        if loadbalancer.status == self.ACTIVE_STATUS:
            return True
        else:
            return False

    def _valid_HTTPS_redirect_with_HTTP_prot(self):
        """Determine if HTTPS redirect is valid when protocol is HTTP"""
        proto = self.properties[self.PROTOCOL]
        redir = self.properties[self.HTTPS_REDIRECT]
        termcfg = self.properties.get(self.SSL_TERMINATION) or {}
        seconly = termcfg.get(self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY, False)
        secport = termcfg.get(self.SSL_TERMINATION_SECURE_PORT, 0)
        if (redir and (proto == "HTTP") and seconly and (secport == 443)):
            return True
        return False

    def _configure_post_creation(self, loadbalancer):
        """Configure all load balancer properties post creation.

        These properties can only be set after the load balancer is created.
        """
        if self.properties[self.ACCESS_LIST]:
            while not self._check_active():
                yield
            loadbalancer.add_access_list(self.properties[self.ACCESS_LIST])

        if self.properties[self.ERROR_PAGE]:
            while not self._check_active():
                yield
            loadbalancer.set_error_page(self.properties[self.ERROR_PAGE])

        if self.properties[self.SSL_TERMINATION]:
            while not self._check_active():
                yield
            ssl_term = self.properties[self.SSL_TERMINATION]
            loadbalancer.add_ssl_termination(
                ssl_term[self.SSL_TERMINATION_SECURE_PORT],
                ssl_term[self.SSL_TERMINATION_PRIVATEKEY],
                ssl_term[self.SSL_TERMINATION_CERTIFICATE],
                intermediateCertificate=ssl_term[
                    self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE],
                enabled=True,
                secureTrafficOnly=ssl_term[
                    self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY])

        if self._valid_HTTPS_redirect_with_HTTP_prot():
            while not self._check_active():
                yield
            loadbalancer.update(httpsRedirect=True)

        if self.CONTENT_CACHING in self.properties:
            enabled = self.properties[self.CONTENT_CACHING] == 'ENABLED'
            while not self._check_active():
                yield
            loadbalancer.content_caching = enabled

    def _process_node(self, node):
        if not node.get(self.NODE_ADDRESSES):
            yield node
        else:
            for addr in node.get(self.NODE_ADDRESSES):
                norm_node = copy.deepcopy(node)
                norm_node['address'] = addr
                del norm_node[self.NODE_ADDRESSES]
                yield norm_node

    def _process_nodes(self, node_list):
        node_itr = six.moves.map(self._process_node, node_list)
        return itertools.chain.from_iterable(node_itr)

    def _validate_https_redirect(self):
        redir = self.properties[self.HTTPS_REDIRECT]
        proto = self.properties[self.PROTOCOL]

        if (redir and (proto != "HTTPS")
                and not self._valid_HTTPS_redirect_with_HTTP_prot()):
            message = _("HTTPS redirect is only available for the HTTPS "
                        "protocol (port=443), or the HTTP protocol with "
                        "a properly configured SSL termination "
                        "(secureTrafficOnly=true, securePort=443).")
            raise exception.StackValidationFailed(message=message)

    def handle_create(self):
        node_list = self._process_nodes(self.properties.get(self.NODES))
        nodes = [self.clb.Node(**node) for node in node_list]
        vips = self.properties.get(self.VIRTUAL_IPS)

        virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)

        (session_persistence, connection_logging,
         metadata) = self._alter_properties_for_api()

        lb_body = {
            'port': self.properties[self.PORT],
            'protocol': self.properties[self.PROTOCOL],
            'nodes': nodes,
            'virtual_ips': virtual_ips,
            'algorithm': self.properties.get(self.ALGORITHM),
            'halfClosed': self.properties.get(self.HALF_CLOSED),
            'connectionThrottle':
            self.properties.get(self.CONNECTION_THROTTLE),
            'metadata': metadata,
            'healthMonitor': self.properties.get(self.HEALTH_MONITOR),
            'sessionPersistence': session_persistence,
            'timeout': self.properties.get(self.TIMEOUT),
            'connectionLogging': connection_logging,
            self.HTTPS_REDIRECT: self.properties[self.HTTPS_REDIRECT]
        }
        if self._valid_HTTPS_redirect_with_HTTP_prot():
            lb_body[self.HTTPS_REDIRECT] = False
        self._validate_https_redirect()

        lb_name = (self.properties.get(self.NAME)
                   or self.physical_resource_name())
        LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
        loadbalancer = self.clb.create(lb_name, **lb_body)
        self.resource_id_set(str(loadbalancer.id))

        post_create = scheduler.TaskRunner(self._configure_post_creation,
                                           loadbalancer)
        post_create(timeout=600)
        return loadbalancer

    def check_create_complete(self, loadbalancer):
        return self._check_active()

    def handle_check(self):
        loadbalancer = self.clb.get(self.resource_id)
        if not self._check_active():
            raise exception.Error(
                _("Cloud LoadBalancer is not ACTIVE "
                  "(was: %s)") % loadbalancer.status)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Add and remove nodes specified in the prop_diff."""
        lb = self.clb.get(self.resource_id)
        checkers = []

        if self.NODES in prop_diff:
            updated_nodes = prop_diff[self.NODES]
            checkers.extend(self._update_nodes(lb, updated_nodes))

        updated_props = {}
        for prop in six.iterkeys(prop_diff):
            if prop in self.LB_UPDATE_PROPS:
                updated_props[prop] = prop_diff[prop]
        if updated_props:
            checkers.append(self._update_lb_properties(lb, updated_props))

        if self.HEALTH_MONITOR in prop_diff:
            updated_hm = prop_diff[self.HEALTH_MONITOR]
            checkers.append(self._update_health_monitor(lb, updated_hm))

        if self.SESSION_PERSISTENCE in prop_diff:
            updated_sp = prop_diff[self.SESSION_PERSISTENCE]
            checkers.append(self._update_session_persistence(lb, updated_sp))

        if self.SSL_TERMINATION in prop_diff:
            updated_ssl_term = prop_diff[self.SSL_TERMINATION]
            checkers.append(self._update_ssl_termination(lb, updated_ssl_term))

        if self.METADATA in prop_diff:
            updated_metadata = prop_diff[self.METADATA]
            checkers.append(self._update_metadata(lb, updated_metadata))

        if self.ERROR_PAGE in prop_diff:
            updated_errorpage = prop_diff[self.ERROR_PAGE]
            checkers.append(self._update_errorpage(lb, updated_errorpage))

        if self.CONNECTION_LOGGING in prop_diff:
            updated_cl = prop_diff[self.CONNECTION_LOGGING]
            checkers.append(self._update_connection_logging(lb, updated_cl))

        if self.CONNECTION_THROTTLE in prop_diff:
            updated_ct = prop_diff[self.CONNECTION_THROTTLE]
            checkers.append(self._update_connection_throttle(lb, updated_ct))

        if self.CONTENT_CACHING in prop_diff:
            updated_cc = prop_diff[self.CONTENT_CACHING]
            checkers.append(self._update_content_caching(lb, updated_cc))

        return checkers

    def _update_nodes(self, lb, updated_nodes):
        @retry_if_immutable
        def add_nodes(lb, new_nodes):
            lb.add_nodes(new_nodes)

        @retry_if_immutable
        def remove_node(known, node):
            known[node].delete()

        @retry_if_immutable
        def update_node(known, node):
            known[node].update()

        checkers = []
        current_nodes = lb.nodes
        diff_nodes = self._process_nodes(updated_nodes)
        # Loadbalancers can be uniquely identified by address and
        # port.  Old is a dict of all nodes the loadbalancer
        # currently knows about.
        old = dict(("{0.address}{0.port}".format(node), node)
                   for node in current_nodes)
        # New is a dict of the nodes the loadbalancer will know
        # about after this update.
        new = dict(("%s%s" % (node["address"], node[self.NODE_PORT]), node)
                   for node in diff_nodes)

        old_set = set(six.iterkeys(old))
        new_set = set(six.iterkeys(new))

        deleted = old_set.difference(new_set)
        added = new_set.difference(old_set)
        updated = new_set.intersection(old_set)

        if len(current_nodes) + len(added) - len(deleted) < 1:
            raise ValueError(
                _("The loadbalancer:%s requires at least one "
                  "node.") % self.name)
        """
        Add loadbalancers in the new map that are not in the old map.
        Add before delete to avoid deleting the last node and getting in
        an invalid state.
        """
        new_nodes = [self.clb.Node(**new[lb_node]) for lb_node in added]
        if new_nodes:
            checkers.append(scheduler.TaskRunner(add_nodes, lb, new_nodes))

        # Delete loadbalancers in the old dict that are not in the
        # new dict.
        for node in deleted:
            checkers.append(scheduler.TaskRunner(remove_node, old, node))

        # Update nodes that have been changed
        for node in updated:
            node_changed = False
            for attribute in six.iterkeys(new[node]):
                new_value = new[node][attribute]
                if new_value and new_value != getattr(old[node], attribute):
                    node_changed = True
                    setattr(old[node], attribute, new_value)
            if node_changed:
                checkers.append(scheduler.TaskRunner(update_node, old, node))

        return checkers

    def _update_lb_properties(self, lb, updated_props):
        @retry_if_immutable
        def update_lb():
            lb.update(**updated_props)

        return scheduler.TaskRunner(update_lb)

    def _update_health_monitor(self, lb, updated_hm):
        @retry_if_immutable
        def add_health_monitor():
            lb.add_health_monitor(**updated_hm)

        @retry_if_immutable
        def delete_health_monitor():
            lb.delete_health_monitor()

        if updated_hm is None:
            return scheduler.TaskRunner(delete_health_monitor)
        else:
            # Adding a health monitor is a destructive, so there's
            # no need to delete, then add
            return scheduler.TaskRunner(add_health_monitor)

    def _update_session_persistence(self, lb, updated_sp):
        @retry_if_immutable
        def add_session_persistence():
            lb.session_persistence = updated_sp

        @retry_if_immutable
        def delete_session_persistence():
            lb.session_persistence = ''

        if updated_sp is None:
            return scheduler.TaskRunner(delete_session_persistence)
        else:
            # Adding session persistence is destructive
            return scheduler.TaskRunner(add_session_persistence)

    def _update_ssl_termination(self, lb, updated_ssl_term):
        @retry_if_immutable
        def add_ssl_termination():
            lb.add_ssl_termination(**updated_ssl_term)

        @retry_if_immutable
        def delete_ssl_termination():
            lb.delete_ssl_termination()

        if updated_ssl_term is None:
            return scheduler.TaskRunner(delete_ssl_termination)
        else:
            # Adding SSL termination is destructive
            return scheduler.TaskRunner(add_ssl_termination)

    def _update_metadata(self, lb, updated_metadata):
        @retry_if_immutable
        def add_metadata():
            lb.set_metadata(updated_metadata)

        @retry_if_immutable
        def delete_metadata():
            lb.delete_metadata()

        if updated_metadata is None:
            return scheduler.TaskRunner(delete_metadata)
        else:
            return scheduler.TaskRunner(add_metadata)

    def _update_errorpage(self, lb, updated_errorpage):
        @retry_if_immutable
        def add_errorpage():
            lb.set_error_page(updated_errorpage)

        @retry_if_immutable
        def delete_errorpage():
            lb.clear_error_page()

        if updated_errorpage is None:
            return scheduler.TaskRunner(delete_errorpage)
        else:
            return scheduler.TaskRunner(add_errorpage)

    def _update_connection_logging(self, lb, updated_cl):
        @retry_if_immutable
        def enable_connection_logging():
            lb.connection_logging = True

        @retry_if_immutable
        def disable_connection_logging():
            lb.connection_logging = False

        if updated_cl:
            return scheduler.TaskRunner(enable_connection_logging)
        else:
            return scheduler.TaskRunner(disable_connection_logging)

    def _update_connection_throttle(self, lb, updated_ct):
        @retry_if_immutable
        def add_connection_throttle():
            lb.add_connection_throttle(**updated_ct)

        @retry_if_immutable
        def delete_connection_throttle():
            lb.delete_connection_throttle()

        if updated_ct is None:
            return scheduler.TaskRunner(delete_connection_throttle)
        else:
            return scheduler.TaskRunner(add_connection_throttle)

    def _update_content_caching(self, lb, updated_cc):
        @retry_if_immutable
        def enable_content_caching():
            lb.content_caching = True

        @retry_if_immutable
        def disable_content_caching():
            lb.content_caching = False

        if updated_cc == 'ENABLED':
            return scheduler.TaskRunner(enable_content_caching)
        else:
            return scheduler.TaskRunner(disable_content_caching)

    def check_update_complete(self, checkers):
        """Push all checkers to completion in list order."""
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def check_delete_complete(self, *args):
        if self.resource_id is None:
            return True

        try:
            loadbalancer = self.clb.get(self.resource_id)
        except NotFound:
            return True

        if loadbalancer.status == self.DELETED_STATUS:
            return True

        elif loadbalancer.status == self.PENDING_DELETE_STATUS:
            return False

        else:
            try:
                loadbalancer.delete()
            except Exception as exc:
                if lb_immutable(exc):
                    return False
                raise

        return False

    def _remove_none(self, property_dict):
        """Remove None values that would cause schema validation problems.

        These are values that may be initialized to None.
        """
        return dict((key, value)
                    for (key, value) in six.iteritems(property_dict)
                    if value is not None)

    def validate(self):
        """Validate any of the provided params."""
        res = super(CloudLoadBalancer, self).validate()
        if res:
            return res

        if self.properties.get(self.HALF_CLOSED):
            if not (self.properties[self.PROTOCOL] == 'TCP'
                    or self.properties[self.PROTOCOL] == 'TCP_CLIENT_FIRST'):
                message = (_('The %s property is only available for the TCP '
                             'or TCP_CLIENT_FIRST protocols') %
                           self.HALF_CLOSED)
                raise exception.StackValidationFailed(message=message)

        # health_monitor connect and http types require completely different
        # schema
        if self.properties.get(self.HEALTH_MONITOR):
            prop_val = self.properties[self.HEALTH_MONITOR]
            health_monitor = self._remove_none(prop_val)

            schema = self._health_monitor_schema
            if health_monitor[self.HEALTH_MONITOR_TYPE] == 'CONNECT':
                schema = dict((k, v) for k, v in schema.items()
                              if k in self._HEALTH_MONITOR_CONNECT_KEYS)
            properties.Properties(schema, health_monitor, function.resolve,
                                  self.name).validate()

        # validate if HTTPS_REDIRECT is true
        self._validate_https_redirect()
        # if a vip specifies and id, it can't specify version or type;
        # otherwise version and type are required
        for vip in self.properties.get(self.VIRTUAL_IPS, []):
            has_id = vip.get(self.VIRTUAL_IP_ID) is not None
            has_version = vip.get(self.VIRTUAL_IP_IP_VERSION) is not None
            has_type = vip.get(self.VIRTUAL_IP_TYPE) is not None
            if has_id:
                if (has_version or has_type):
                    message = _("Cannot specify type or version if VIP id is"
                                " specified.")
                    raise exception.StackValidationFailed(message=message)
            elif not (has_version and has_type):
                message = _("Must specify VIP type and version if no id "
                            "specified.")
                raise exception.StackValidationFailed(message=message)

    def _public_ip(self, lb):
        for ip in lb.virtual_ips:
            if ip.type == 'PUBLIC':
                return six.text_type(ip.address)

    def _resolve_attribute(self, key):
        if self.resource_id:
            lb = self.clb.get(self.resource_id)
            attribute_function = {
                self.PUBLIC_IP:
                self._public_ip(lb),
                self.VIPS: [{
                    "id": vip.id,
                    "type": vip.type,
                    "ip_version": vip.ip_version,
                    "address": vip.address
                } for vip in lb.virtual_ips]
            }
            if key not in attribute_function:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            function = attribute_function[key]
            LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'), {
                'name': self.name,
                'key': key,
                'function': function
            })
            return function
Exemple #21
0
class ZaqarQueue(resource.Resource):
    """A resource for managing Zaqar queues.

    Queue is a logical entity that groups messages. Ideally a queue is created
    per work type. For example, if you want to compress files, you would create
    a queue dedicated for this job. Any application that reads from this queue
    would only compress files.
    """

    default_client_name = "zaqar"

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME,
        METADATA,
    ) = (
        'name',
        'metadata',
    )

    ATTRIBUTES = (
        QUEUE_ID,
        HREF,
    ) = (
        'queue_id',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _("Name of the queue instance to create."),
                          required=True),
        METADATA:
        properties.Schema(properties.Schema.MAP,
                          description=_(
                              "Arbitrary key/value metadata to store "
                              "contextual information about this queue."),
                          update_allowed=True)
    }

    attributes_schema = {
        QUEUE_ID:
        attributes.Schema(
            _("ID of the queue."),
            cache_mode=attributes.Schema.CACHE_NONE,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='6.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    message=_("Use get_resource|Ref command instead. "
                              "For example: { get_resource : "
                              "<resource_name> }"),
                    version='2015.1',
                    previous_status=support.SupportStatus(version='2014.1')))),
        HREF:
        attributes.Schema(_("The resource href of the queue.")),
    }

    def physical_resource_name(self):
        return self.properties[self.NAME]

    def handle_create(self):
        """Create a zaqar message queue."""
        queue_name = self.physical_resource_name()
        queue = self.client().queue(queue_name, auto_create=False)
        metadata = self.properties.get('metadata')
        if metadata:
            queue.metadata(new_meta=metadata)
        self.resource_id_set(queue_name)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Update queue metadata."""
        if 'metadata' in prop_diff:
            queue = self.client().queue(self.resource_id, auto_create=False)
            metadata = prop_diff['metadata']
            queue.metadata(new_meta=metadata)

    def handle_delete(self):
        """Delete a zaqar message queue."""
        if not self.resource_id:
            return
        with self.client_plugin().ignore_not_found:
            self.client().queue(self.resource_id, auto_create=False).delete()

    def href(self):
        api_endpoint = self.client().api_url
        queue_name = self.physical_resource_name()
        if api_endpoint.endswith('/'):
            return '%squeues/%s' % (api_endpoint, queue_name)
        else:
            return '%s/queues/%s' % (api_endpoint, queue_name)

    def _resolve_attribute(self, name):
        if name == self.QUEUE_ID:
            return self.resource_id
        elif name == self.HREF:
            return self.href()

    def _show_resource(self):
        queue = self.client().queue(self.resource_id, auto_create=False)
        metadata = queue.metadata()
        return {self.METADATA: metadata}

    def parse_live_resource_data(self, resource_properties, resource_data):
        return {
            self.NAME: self.resource_id,
            self.METADATA: resource_data[self.METADATA]
        }
Exemple #22
0
class Server(stack_user.StackUser):

    PROPERTIES = (
        NAME, IMAGE, BLOCK_DEVICE_MAPPING, FLAVOR,
        FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
        ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
        SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
        RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
        ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT
    ) = (
        'name', 'image', 'block_device_mapping', 'flavor',
        'flavor_update_policy', 'image_update_policy', 'key_name',
        'admin_user', 'availability_zone', 'security_groups', 'networks',
        'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
        'reservation_id', 'config_drive', 'diskConfig', 'personality',
        'admin_pass', 'software_config_transport'
    )

    _BLOCK_DEVICE_MAPPING_KEYS = (
        BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
        BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
        BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
        BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
    ) = (
        'device_name', 'volume_id',
        'snapshot_id',
        'volume_size',
        'delete_on_termination',
    )

    _NETWORK_KEYS = (
        NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
    ) = (
        'uuid', 'network', 'fixed_ip', 'port',
    )

    _SOFTWARE_CONFIG_FORMATS = (
        HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
    ) = (
        'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
    )

    _SOFTWARE_CONFIG_TRANSPORTS = (
        POLL_SERVER_CFN, POLL_SERVER_HEAT
    ) = (
        'POLL_SERVER_CFN', 'POLL_SERVER_HEAT'
    )

    ATTRIBUTES = (
        SHOW, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS, INSTANCE_NAME,
        ACCESSIPV4, ACCESSIPV6,
    ) = (
        'show', 'addresses', 'networks', 'first_address', 'instance_name',
        'accessIPv4', 'accessIPv6',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Server name.'),
            update_allowed=True
        ),
        IMAGE: properties.Schema(
            properties.Schema.STRING,
            _('The ID or name of the image to boot with.'),
            constraints=[
                constraints.CustomConstraint('glance.image')
            ],
            update_allowed=True
        ),
        BLOCK_DEVICE_MAPPING: properties.Schema(
            properties.Schema.LIST,
            _('Block device mappings for this server.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('A device name where the volume will be '
                          'attached in the system at /dev/device_name. '
                          'This value is typically vda.'),
                        required=True
                    ),
                    BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
                        properties.Schema.STRING,
                        _('The ID of the volume to boot from. Only one '
                          'of volume_id or snapshot_id should be '
                          'provided.')
                    ),
                    BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
                        properties.Schema.STRING,
                        _('The ID of the snapshot to create a volume '
                          'from.')
                    ),
                    BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
                        properties.Schema.INTEGER,
                        _('The size of the volume, in GB. It is safe to '
                          'leave this blank and have the Compute service '
                          'infer the size.')
                    ),
                    BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Indicate whether the volume should be deleted '
                          'when the server is terminated.')
                    ),
                },
            )
        ),
        FLAVOR: properties.Schema(
            properties.Schema.STRING,
            _('The ID or name of the flavor to boot onto.'),
            required=True,
            update_allowed=True
        ),
        FLAVOR_UPDATE_POLICY: properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to apply a flavor update; either by requesting '
              'a server resize or by replacing the entire server.'),
            default='RESIZE',
            constraints=[
                constraints.AllowedValues(['RESIZE', 'REPLACE']),
            ],
            update_allowed=True
        ),
        IMAGE_UPDATE_POLICY: properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to apply an image-id update; either by '
              'requesting a server rebuild or by replacing the entire server'),
            default='REPLACE',
            constraints=[
                constraints.AllowedValues(['REBUILD', 'REPLACE',
                                           'REBUILD_PRESERVE_EPHEMERAL']),
            ],
            update_allowed=True
        ),
        KEY_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of keypair to inject into the server.'),
            constraints=[
                constraints.CustomConstraint('nova.keypair')
            ]
        ),
        ADMIN_USER: properties.Schema(
            properties.Schema.STRING,
            _('Name of the administrative user to use on the server. '
              'This property will be removed from Juno in favor of the '
              'default cloud-init user set up for each image (e.g. "ubuntu" '
              'for Ubuntu 12.04+, "fedora" for Fedora 19+ and "cloud-user" '
              'for CentOS/RHEL 6.5).'),
            support_status=support.SupportStatus(status=support.DEPRECATED)
        ),
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _('Name of the availability zone for server placement.')
        ),
        SECURITY_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _('List of security group names or IDs. Cannot be used if '
              'neutron ports are associated with this server; assign '
              'security groups to the ports instead.'),
            default=[]
        ),
        NETWORKS: properties.Schema(
            properties.Schema.LIST,
            _('An ordered list of nics to be added to this server, with '
              'information about connected networks, fixed ips, port etc.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_UUID: properties.Schema(
                        properties.Schema.STRING,
                        _('ID of network to create a port on.'),
                        support_status=support.SupportStatus(
                            support.DEPRECATED,
                            _('Use property %s.') % NETWORK_ID)
                    ),
                    NETWORK_ID: properties.Schema(
                        properties.Schema.STRING,
                        _('Name or ID of network to create a port on.')
                    ),
                    NETWORK_FIXED_IP: properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IP address to specify for the port '
                          'created on the requested network.')
                    ),
                    NETWORK_PORT: properties.Schema(
                        properties.Schema.STRING,
                        _('ID of an existing port to associate with this '
                          'server.')
                    ),
                },
            ),
            update_allowed=True
        ),
        SCHEDULER_HINTS: properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key-value pairs specified by the client to help '
              'boot a server.')
        ),
        METADATA: properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key/value metadata to store for this server. Both '
              'keys and values must be 255 characters or less.  Non-string '
              'values will be serialized to JSON (and the serialized '
              'string must be 255 characters or less).'),
            update_allowed=True
        ),
        USER_DATA_FORMAT: properties.Schema(
            properties.Schema.STRING,
            _('How the user_data should be formatted for the server. For '
              'HEAT_CFNTOOLS, the user_data is bundled as part of the '
              'heat-cfntools cloud-init boot configuration data. For RAW '
              'the user_data is passed to Nova unmodified. '
              'For SOFTWARE_CONFIG user_data is bundled as part of the '
              'software config data, and metadata is derived from any '
              'associated SoftwareDeployment resources.'),
            default=HEAT_CFNTOOLS,
            constraints=[
                constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
            ]
        ),
        SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
            properties.Schema.STRING,
            _('How the server should receive the metadata required for '
              'software configuration. POLL_SERVER_CFN will allow calls to '
              'the cfn API action DescribeStackResource authenticated with '
              'the provided keypair. POLL_SERVER_HEAT will allow calls to '
              'the Heat API resource-show using the provided keystone '
              'credentials.'),
            default=POLL_SERVER_CFN,
            constraints=[
                constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
            ]
        ),
        USER_DATA: properties.Schema(
            properties.Schema.STRING,
            _('User data script to be executed by cloud-init.'),
            default=''
        ),
        RESERVATION_ID: properties.Schema(
            properties.Schema.STRING,
            _('A UUID for the set of servers being requested.')
        ),
        CONFIG_DRIVE: properties.Schema(
            properties.Schema.BOOLEAN,
            _('If True, enable config drive on the server.')
        ),
        DISK_CONFIG: properties.Schema(
            properties.Schema.STRING,
            _('Control how the disk is partitioned when the server is '
              'created.'),
            constraints=[
                constraints.AllowedValues(['AUTO', 'MANUAL']),
            ]
        ),
        PERSONALITY: properties.Schema(
            properties.Schema.MAP,
            _('A map of files to create/overwrite on the server upon boot. '
              'Keys are file names and values are the file contents.'),
            default={}
        ),
        ADMIN_PASS: properties.Schema(
            properties.Schema.STRING,
            _('The administrator password for the server.'),
            required=False,
            update_allowed=True
        ),
    }

    attributes_schema = {
        SHOW: attributes.Schema(
            _('A dict of all server details as returned by the API.')
        ),
        ADDRESSES: attributes.Schema(
            _('A dict of all network addresses with corresponding port_id.')
        ),
        NETWORKS_ATTR: attributes.Schema(
            _('A dict of assigned network addresses of the form: '
              '{"public": [ip1, ip2...], "private": [ip3, ip4]}.')
        ),
        FIRST_ADDRESS: attributes.Schema(
            _('Convenience attribute to fetch the first assigned network '
              'address, or an empty string if nothing has been assigned at '
              'this time. Result may not be predictable if the server has '
              'addresses from more than one network.'),
            support_status=support.SupportStatus(
                status=support.DEPRECATED,
                message=_('Use the networks attribute instead of '
                          'first_address. For example: "{get_attr: '
                          '[<server name>, networks, <network name>, 0]}"')
            )
        ),
        INSTANCE_NAME: attributes.Schema(
            _('AWS compatible instance name.')
        ),
        ACCESSIPV4: attributes.Schema(
            _('The manually assigned alternative public IPv4 address '
              'of the server.')
        ),
        ACCESSIPV6: attributes.Schema(
            _('The manually assigned alternative public IPv6 address '
              'of the server.')
        ),
    }

    # Server host name limit to 53 characters by due to typical default
    # linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
    physical_resource_name_limit = 53

    def __init__(self, name, json_snippet, stack):
        super(Server, self).__init__(name, json_snippet, stack)
        if self.user_data_software_config():
            self._register_access_key()

    def physical_resource_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name

        return super(Server, self).physical_resource_name()

    def _config_drive(self):
        # This method is overridden by the derived CloudServer resource
        return self.properties.get(self.CONFIG_DRIVE)

    def _populate_deployments_metadata(self):
        meta = self.metadata_get(True) or {}
        meta['deployments'] = meta.get('deployments', [])
        if self.transport_poll_server_heat():
            meta['os-collect-config'] = {'heat': {
                'user_id': self._get_user_id(),
                'password': self.password,
                'auth_url': self.context.auth_url,
                'project_id': self.stack.stack_user_project_id,
                'stack_id': self.stack.identifier().stack_path(),
                'resource_name': self.name}
            }
        elif self.transport_poll_server_cfn():
            meta['os-collect-config'] = {'cfn': {
                'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url,
                'access_key_id': self.access_key,
                'secret_access_key': self.secret_key,
                'stack_name': self.stack.name,
                'path': '%s.Metadata' % self.name}
            }
        self.metadata_set(meta)

    def _register_access_key(self):
        '''
        Access is limited to this resource, which created the keypair
        '''
        def access_allowed(resource_name):
            return resource_name == self.name

        if self.transport_poll_server_cfn():
            self.stack.register_access_allowed_handler(
                self.access_key, access_allowed)
        elif self.transport_poll_server_heat():
            self.stack.register_access_allowed_handler(
                self._get_user_id(), access_allowed)

    def _create_transport_credentials(self):
        if self.transport_poll_server_cfn():
            self._create_user()
            self._create_keypair()

        elif self.transport_poll_server_heat():
            self.password = uuid.uuid4().hex
            self._create_user()

        self._register_access_key()

    @property
    def access_key(self):
        return self.data().get('access_key')

    @property
    def secret_key(self):
        return self.data().get('secret_key')

    @property
    def password(self):
        return self.data().get('password')

    @password.setter
    def password(self, password):
        if password is None:
            self.data_delete('password')
        else:
            self.data_set('password', password, True)

    def user_data_raw(self):
        return self.properties.get(self.USER_DATA_FORMAT) == self.RAW

    def user_data_software_config(self):
        return self.properties.get(
            self.USER_DATA_FORMAT) == self.SOFTWARE_CONFIG

    def transport_poll_server_cfn(self):
        return self.properties.get(
            self.SOFTWARE_CONFIG_TRANSPORT) == self.POLL_SERVER_CFN

    def transport_poll_server_heat(self):
        return self.properties.get(
            self.SOFTWARE_CONFIG_TRANSPORT) == self.POLL_SERVER_HEAT

    def handle_create(self):
        security_groups = self.properties.get(self.SECURITY_GROUPS)

        user_data_format = self.properties.get(self.USER_DATA_FORMAT)
        ud_content = self.properties.get(self.USER_DATA)
        if self.user_data_software_config() or self.user_data_raw():
            if uuidutils.is_uuid_like(ud_content):
                # attempt to load the userdata from software config
                try:
                    ud_content = sc.SoftwareConfig.get_software_config(
                        self.heat(), ud_content)
                except exception.SoftwareConfigMissing:
                    # no config was found, so do not modify the user_data
                    pass

        if self.user_data_software_config():
            self._create_transport_credentials()
            self._populate_deployments_metadata()

        if self.properties[self.ADMIN_USER]:
            instance_user = self.properties[self.ADMIN_USER]
        elif cfg.CONF.instance_user:
            instance_user = cfg.CONF.instance_user
        else:
            instance_user = None

        userdata = nova_utils.build_userdata(
            self,
            ud_content,
            instance_user=instance_user,
            user_data_format=user_data_format)

        flavor = self.properties[self.FLAVOR]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]

        image = self.properties.get(self.IMAGE)
        if image:
            image = glance_utils.get_image_id(self.glance(), image)

        flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)

        instance_meta = self.properties.get(self.METADATA)
        if instance_meta is not None:
            instance_meta = nova_utils.meta_serialize(instance_meta)

        scheduler_hints = self.properties.get(self.SCHEDULER_HINTS)
        nics = self._build_nics(self.properties.get(self.NETWORKS))
        block_device_mapping = self._build_block_device_mapping(
            self.properties.get(self.BLOCK_DEVICE_MAPPING))
        reservation_id = self.properties.get(self.RESERVATION_ID)
        disk_config = self.properties.get(self.DISK_CONFIG)
        admin_pass = self.properties.get(self.ADMIN_PASS) or None
        personality_files = self.properties.get(self.PERSONALITY)
        key_name = self.properties.get(self.KEY_NAME)

        server = None
        try:
            server = self.nova().servers.create(
                name=self.physical_resource_name(),
                image=image,
                flavor=flavor_id,
                key_name=key_name,
                security_groups=security_groups,
                userdata=userdata,
                meta=instance_meta,
                scheduler_hints=scheduler_hints,
                nics=nics,
                availability_zone=availability_zone,
                block_device_mapping=block_device_mapping,
                reservation_id=reservation_id,
                config_drive=self._config_drive(),
                disk_config=disk_config,
                files=personality_files,
                admin_pass=admin_pass)
        finally:
            # Avoid a race condition where the thread could be cancelled
            # before the ID is stored
            if server is not None:
                self.resource_id_set(server.id)

        return server

    def check_create_complete(self, server):
        return self._check_active(server)

    def _check_active(self, server):

        if server.status != 'ACTIVE':
            nova_utils.refresh_server(server)

        # Some clouds append extra (STATUS) strings to the status
        short_server_status = server.status.split('(')[0]
        if short_server_status in nova_utils.deferred_server_statuses:
            return False
        elif server.status == 'ACTIVE':
            return True
        elif server.status == 'ERROR':
            exc = exception.Error(_('Creation of server %s failed.') %
                                  server.name)
            raise exc
        else:
            exc = exception.Error(_('Creation of server %(server)s failed '
                                    'with unknown status: %(status)s') %
                                  dict(server=server.name,
                                       status=server.status))
            raise exc

    @classmethod
    def _build_block_device_mapping(cls, bdm):
        if not bdm:
            return None
        bdm_dict = {}
        for mapping in bdm:
            mapping_parts = []
            snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
            if snapshot_id:
                mapping_parts.append(snapshot_id)
                mapping_parts.append('snap')
            else:
                volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
                mapping_parts.append(volume_id)
                mapping_parts.append('')

            volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
            delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
            if volume_size or delete:
                mapping_parts.append(str(volume_size or 0))
            if delete:
                mapping_parts.append(str(delete))

            device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
            bdm_dict[device_name] = ':'.join(mapping_parts)

        return bdm_dict

    def _build_nics(self, networks):
        if not networks:
            return None

        nics = []

        for net_data in networks:
            nic_info = {}
            if net_data.get(self.NETWORK_UUID):
                nic_info['net-id'] = net_data[self.NETWORK_UUID]
            label_or_uuid = net_data.get(self.NETWORK_ID)
            if label_or_uuid:
                if uuidutils.is_uuid_like(label_or_uuid):
                    nic_info['net-id'] = label_or_uuid
                else:
                    network = self.nova().networks.find(label=label_or_uuid)
                    nic_info['net-id'] = network.id
            if net_data.get(self.NETWORK_FIXED_IP):
                nic_info['v4-fixed-ip'] = net_data[self.NETWORK_FIXED_IP]
            if net_data.get(self.NETWORK_PORT):
                nic_info['port-id'] = net_data[self.NETWORK_PORT]
            nics.append(nic_info)
        return nics

    def _add_port_for_address(self, server):
        nets = copy.deepcopy(server.addresses)
        ifaces = server.interface_list()
        ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
                                           iface.mac_addr), iface.port_id)
                                         for iface in ifaces)
        for net_name in nets:
            for addr in nets[net_name]:
                addr['port'] = ip_mac_mapping_on_port_id.get(
                    (addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
        return nets

    def _resolve_attribute(self, name):
        if name == self.FIRST_ADDRESS:
            return nova_utils.server_to_ipaddress(
                self.nova(), self.resource_id) or ''
        try:
            server = self.nova().servers.get(self.resource_id)
        except clients.novaclient.exceptions.NotFound as ex:
            LOG.warn(_('Instance (%(server)s) not found: %(ex)s')
                     % {'server': self.resource_id, 'ex': ex})
            return ''
        if name == self.ADDRESSES:
            return self._add_port_for_address(server)
        if name == self.NETWORKS_ATTR:
            return server.networks
        if name == self.INSTANCE_NAME:
            return server._info.get('OS-EXT-SRV-ATTR:instance_name')
        if name == self.ACCESSIPV4:
            return server.accessIPv4
        if name == self.ACCESSIPV6:
            return server.accessIPv6
        if name == self.SHOW:
            return server._info

    def add_dependencies(self, deps):
        super(Server, self).add_dependencies(deps)
        # Depend on any Subnet in this template with the same
        # network_id as the networks attached to this server.
        # It is not known which subnet a server might be assigned
        # to so all subnets in a network should be created before
        # the servers in that network.
        for res in self.stack.itervalues():
            if (res.has_interface('OS::Neutron::Subnet')):
                subnet_net = res.properties.get(subnet.Subnet.NETWORK_ID)
                for net in self.properties.get(self.NETWORKS):
                    # we do not need to worry about NETWORK_ID values which are
                    # names instead of UUIDs since these were not created
                    # by this stack
                    net_id = (net.get(self.NETWORK_ID) or
                              net.get(self.NETWORK_UUID))
                    if net_id and net_id == subnet_net:
                        deps += (self, res)
                        break

    def _get_network_matches(self, old_networks, new_networks):
        # make new_networks similar on old_networks
        for net in new_networks:
            for key in ('port', 'network', 'fixed_ip', 'uuid'):
                net.setdefault(key)
        # find matches and remove them from old and new networks
        not_updated_networks = []
        for net in old_networks:
            if net in new_networks:
                new_networks.remove(net)
                not_updated_networks.append(net)
        for net in not_updated_networks:
            old_networks.remove(net)
        return not_updated_networks

    def update_networks_matching_iface_port(self, nets, interfaces):

        def find_equal(port, net_id, ip, nets):
            for net in nets:
                if (net.get('port') == port or
                        (net.get('fixed_ip') == ip and
                            net.get('network') == net_id)):
                    return net

        def find_poor_net(net_id, nets):
            for net in nets:
                if net == {'port': None, 'network': net_id, 'fixed_ip': None}:
                    return net

        for iface in interfaces:
            # get interface properties
            props = {'port': iface.port_id,
                     'net_id': iface.net_id,
                     'ip': iface.fixed_ips[0]['ip_address'],
                     'nets': nets}
            # try to match by port or network_id with fixed_ip
            net = find_equal(**props)
            if net is not None:
                net['port'] = props['port']
                continue
            # find poor net that has only network_id
            net = find_poor_net(props['net_id'], nets)
            if net is not None:
                net['port'] = props['port']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if 'Metadata' in tmpl_diff:
            self.metadata_set(tmpl_diff['Metadata'])

        checkers = []
        server = None

        if self.METADATA in prop_diff:
            server = self.nova().servers.get(self.resource_id)
            nova_utils.meta_update(self.nova(),
                                   server,
                                   prop_diff[self.METADATA])

        if self.FLAVOR in prop_diff:

            flavor_update_policy = (
                prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
                self.properties.get(self.FLAVOR_UPDATE_POLICY))

            if flavor_update_policy == 'REPLACE':
                raise resource.UpdateReplace(self.name)

            flavor = prop_diff[self.FLAVOR]
            flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
            if not server:
                server = self.nova().servers.get(self.resource_id)
            checker = scheduler.TaskRunner(nova_utils.resize, server, flavor,
                                           flavor_id)
            checkers.append(checker)

        if self.IMAGE in prop_diff:
            image_update_policy = (
                prop_diff.get(self.IMAGE_UPDATE_POLICY) or
                self.properties.get(self.IMAGE_UPDATE_POLICY))
            if image_update_policy == 'REPLACE':
                raise resource.UpdateReplace(self.name)
            image = prop_diff[self.IMAGE]
            image_id = glance_utils.get_image_id(self.glance(), image)
            if not server:
                server = self.nova().servers.get(self.resource_id)
            preserve_ephemeral = (
                image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
            checker = scheduler.TaskRunner(
                nova_utils.rebuild, server, image_id,
                preserve_ephemeral=preserve_ephemeral)
            checkers.append(checker)

        if self.NAME in prop_diff:
            if not server:
                server = self.nova().servers.get(self.resource_id)
            nova_utils.rename(server, prop_diff[self.NAME])

        if self.NETWORKS in prop_diff:
            new_networks = prop_diff.get(self.NETWORKS)
            attach_first_free_port = False
            if not new_networks:
                new_networks = []
                attach_first_free_port = True
            old_networks = self.properties.get(self.NETWORKS)

            if not server:
                server = self.nova().servers.get(self.resource_id)
            interfaces = server.interface_list()

            # if old networks is None, it means that the server got first
            # free port. so we should detach this interface.
            if old_networks is None:
                for iface in interfaces:
                    checker = scheduler.TaskRunner(server.interface_detach,
                                                   iface.port_id)
                    checkers.append(checker)
            # if we have any information in networks field, we should:
            # 1. find similar networks, if they exist
            # 2. remove these networks from new_networks and old_networks
            #    lists
            # 3. detach unmatched networks, which were present in old_networks
            # 4. attach unmatched networks, which were present in new_networks
            else:
                # remove not updated networks from old and new networks lists,
                # also get list these networks
                not_updated_networks = \
                    self._get_network_matches(old_networks, new_networks)

                self.update_networks_matching_iface_port(
                    old_networks + not_updated_networks, interfaces)

                # according to nova interface-detach command detached port
                # will be deleted
                for net in old_networks:
                    checker = scheduler.TaskRunner(server.interface_detach,
                                                   net.get('port'))
                    checkers.append(checker)

            # attach section similar for both variants that
            # were mentioned above

            for net in new_networks:
                if net.get('port'):
                    checker = scheduler.TaskRunner(server.interface_attach,
                                                   net['port'], None, None)
                    checkers.append(checker)
                elif net.get('network'):
                    checker = scheduler.TaskRunner(server.interface_attach,
                                                   None, net['network'],
                                                   net.get('fixed_ip'))
                    checkers.append(checker)

            # if new_networks is None, we should attach first free port,
            # according to similar behavior during instance creation
            if attach_first_free_port:
                checker = scheduler.TaskRunner(server.interface_attach,
                                               None, None, None)
                checkers.append(checker)

        # Optimization: make sure the first task is started before
        # check_update_complete.
        if checkers:
            checkers[0].start()

        return checkers

    def check_update_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def metadata_update(self, new_metadata=None):
        '''
        Refresh the metadata if new_metadata is None
        '''
        if new_metadata is None:
            self.metadata_set(self.parsed_template('Metadata'))

    @staticmethod
    def _check_maximum(count, maximum, msg):
        '''
        Check a count against a maximum, unless maximum is -1 which indicates
        that there is no limit
        '''
        if maximum != -1 and count > maximum:
            raise exception.StackValidationFailed(message=msg)

    def validate(self):
        '''
        Validate any of the provided params
        '''
        super(Server, self).validate()

        # either volume_id or snapshot_id needs to be specified, but not both
        # for block device mapping.
        bdm = self.properties.get(self.BLOCK_DEVICE_MAPPING) or []
        bootable_vol = False
        for mapping in bdm:
            device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
            if device_name == 'vda':
                bootable_vol = True

            volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
            snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
            if volume_id and snapshot_id:
                raise exception.ResourcePropertyConflict(
                    self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
                    self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
            if not volume_id and not snapshot_id:
                msg = _('Either volume_id or snapshot_id must be specified for'
                        ' device mapping %s') % device_name
                raise exception.StackValidationFailed(message=msg)

        # make sure the image exists if specified.
        image = self.properties.get(self.IMAGE)
        if not image and not bootable_vol:
            msg = _('Neither image nor bootable volume is specified for'
                    ' instance %s') % self.name
            raise exception.StackValidationFailed(message=msg)

        # network properties 'uuid' and 'network' shouldn't be used
        # both at once for all networks
        networks = self.properties.get(self.NETWORKS) or []
        # record if any networks include explicit ports
        networks_with_port = False
        for network in networks:
            networks_with_port = networks_with_port or \
                network.get(self.NETWORK_PORT)
            if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
                msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
                        'to the network "%(network)s" for the server '
                        '"%(server)s". The "%(uuid)s" property is deprecated. '
                        'Use only "%(id)s" property.'
                        '') % dict(uuid=self.NETWORK_UUID,
                                   id=self.NETWORK_ID,
                                   network=network[self.NETWORK_ID],
                                   server=self.name)
                raise exception.StackValidationFailed(message=msg)
            elif network.get(self.NETWORK_UUID):
                LOG.info(_('For the server "%(server)s" the "%(uuid)s" '
                           'property is set to network "%(network)s". '
                           '"%(uuid)s" property is deprecated. Use '
                           '"%(id)s"  property instead.')
                         % dict(uuid=self.NETWORK_UUID,
                                id=self.NETWORK_ID,
                                network=network[self.NETWORK_ID],
                                server=self.name))

        # retrieve provider's absolute limits if it will be needed
        metadata = self.properties.get(self.METADATA)
        personality = self.properties.get(self.PERSONALITY)
        if metadata is not None or personality is not None:
            limits = nova_utils.absolute_limits(self.nova())

        # if 'security_groups' present for the server and explict 'port'
        # in one or more entries in 'networks', raise validation error
        if networks_with_port and self.properties.get(self.SECURITY_GROUPS):
            raise exception.ResourcePropertyConflict(
                self.SECURITY_GROUPS,
                "/".join([self.NETWORKS, self.NETWORK_PORT]))

        # verify that the number of metadata entries is not greater
        # than the maximum number allowed in the provider's absolute
        # limits
        if metadata is not None:
            msg = _('Instance metadata must not contain greater than %s '
                    'entries.  This is the maximum number allowed by your '
                    'service provider') % limits['maxServerMeta']
            self._check_maximum(len(metadata),
                                limits['maxServerMeta'], msg)

        # verify the number of personality files and the size of each
        # personality file against the provider's absolute limits
        if personality is not None:
            msg = _("The personality property may not contain "
                    "greater than %s entries.") % limits['maxPersonality']
            self._check_maximum(len(personality),
                                limits['maxPersonality'], msg)

            for path, contents in personality.items():
                msg = (_("The contents of personality file \"%(path)s\" "
                         "is larger than the maximum allowed personality "
                         "file size (%(max_size)s bytes).") %
                       {'path': path,
                        'max_size': limits['maxPersonalitySize']})
                self._check_maximum(len(bytes(contents)),
                                    limits['maxPersonalitySize'], msg)

    def handle_delete(self):
        '''
        Delete a server, blocking until it is disposed by OpenStack
        '''
        if self.resource_id is None:
            return

        if self.user_data_software_config():
            self._delete_user()

        try:
            server = self.nova().servers.get(self.resource_id)
        except clients.novaclient.exceptions.NotFound:
            pass
        else:
            delete = scheduler.TaskRunner(nova_utils.delete_server, server)
            delete(wait_time=0.2)

        self.resource_id_set(None)

    def handle_suspend(self):
        '''
        Suspend a server - note we do not wait for the SUSPENDED state,
        this is polled for by check_suspend_complete in a similar way to the
        create logic so we can take advantage of coroutines
        '''
        if self.resource_id is None:
            raise exception.Error(_('Cannot suspend %s, resource_id not set') %
                                  self.name)

        try:
            server = self.nova().servers.get(self.resource_id)
        except clients.novaclient.exceptions.NotFound:
            raise exception.NotFound(_('Failed to find server %s') %
                                     self.resource_id)
        else:
            LOG.debug('suspending server %s' % self.resource_id)
            # We want the server.suspend to happen after the volume
            # detachement has finished, so pass both tasks and the server
            suspend_runner = scheduler.TaskRunner(server.suspend)
            return server, suspend_runner

    def check_suspend_complete(self, cookie):
        server, suspend_runner = cookie

        if not suspend_runner.started():
            suspend_runner.start()

        if suspend_runner.done():
            if server.status == 'SUSPENDED':
                return True

            nova_utils.refresh_server(server)
            LOG.debug('%(name)s check_suspend_complete status = %(status)s'
                      % {'name': self.name, 'status': server.status})
            if server.status in list(nova_utils.deferred_server_statuses +
                                     ['ACTIVE']):
                return server.status == 'SUSPENDED'
            else:
                exc = exception.Error(_('Suspend of server %(server)s failed '
                                        'with unknown status: %(status)s') %
                                      dict(server=server.name,
                                           status=server.status))
                raise exc

    def handle_resume(self):
        '''
        Resume a server - note we do not wait for the ACTIVE state,
        this is polled for by check_resume_complete in a similar way to the
        create logic so we can take advantage of coroutines
        '''
        if self.resource_id is None:
            raise exception.Error(_('Cannot resume %s, resource_id not set') %
                                  self.name)

        try:
            server = self.nova().servers.get(self.resource_id)
        except clients.novaclient.exceptions.NotFound:
            raise exception.NotFound(_('Failed to find server %s') %
                                     self.resource_id)
        else:
            LOG.debug('resuming server %s' % self.resource_id)
            server.resume()
            return server

    def check_resume_complete(self, server):
        return self._check_active(server)
Exemple #23
0
class BGPVPN(neutron.NeutronResource):
    """A resource for BGPVPN service in neutron.

    """

    PROPERTIES = (NAME, TYPE, DESCRIPTION, ROUTE_DISTINGUISHERS,
                  IMPORT_TARGETS, EXPORT_TARGETS, ROUTE_TARGETS,
                  TENANT_ID) = ('name', 'type', 'description',
                                'route_distinguishers', 'import_targets',
                                'export_targets', 'route_targets', 'tenant_id')

    ATTRIBUTES = (SHOW, STATUS) = ('show', 'status')

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name for the bgpvpn.'),
        ),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('BGP VPN type selection between L3VPN (l3) and '
              'EVPN (l2), default:l3'),
            required=False,
            default='l3',
            constraints=[constraints.AllowedValues(['l2', 'l3'])]),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Description for the bgpvpn.'),
            required=False,
        ),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Tenant this bgpvpn belongs to (name or id).'),
            required=False,
            constraints=[constraints.CustomConstraint('keystone.project')]),
        ROUTE_DISTINGUISHERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of RDs that will be used to advertize BGPVPN routes.'),
            required=False,
            # TODO(tmorin): add a pattern constraint
        ),
        IMPORT_TARGETS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of additional Route Targets to import from.'),
            required=False,
            # TODO(tmorin): add a pattern constraint
        ),
        EXPORT_TARGETS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of additional Route Targets to export to.'),
            required=False,
            # TODO(tmorin): add a pattern constraint
        ),
        ROUTE_TARGETS:
        properties.Schema(
            properties.Schema.LIST,
            _('Route Targets list to import/export for this BGPVPN.'),
            required=False,
            # TODO(tmorin): add a pattern constraint
        ),
    }

    attributes_schema = {
        STATUS: attributes.Schema(_('Status of bgpvpn.'), ),
        SHOW: attributes.Schema(_('All attributes.')),
    }

    def validate(self):
        super(BGPVPN, self).validate()

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        if 'tenant_id' in props:
            tenant_id = self.client_plugin('keystone').get_project_id(
                props['tenant_id'])
            props['tenant_id'] = tenant_id

        bgpvpn = self.neutron().create_bgpvpn({'bgpvpn': props})
        self.resource_id_set(bgpvpn['bgpvpn']['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        raise NotImplementedError()

    def handle_delete(self):
        try:
            self.neutron().delete_bgpvpn(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _confirm_delete(self):
        while True:
            try:
                yield self._show_resource()
            except exception.NotFound:
                return

    def _show_resource(self):
        return self.neutron().show_bgpvpn(self.resource_id)
Exemple #24
0
class NovaFlavor(resource.Resource):
    """A resource for creating OpenStack virtual hardware templates.

    Due to default nova security policy usage of this resource is limited to
    being used by administrators only. The rights may also be delegated to
    other users by redefining the access controls on the nova-api server.

    Note that the current implementation of the Nova Flavor resource does not
    allow specifying the name and flavorid properties for the resource.
    This is done to avoid potential naming collision upon flavor creation as
    all flavor have a global scope.
    """

    support_status = support.SupportStatus(version='2014.2')

    default_client_name = 'nova'

    required_service_extension = 'os-flavor-manage'

    entity = 'flavors'

    PROPERTIES = (ID, NAME, RAM, VCPUS, DISK, SWAP, EPHEMERAL, RXTX_FACTOR,
                  EXTRA_SPECS, IS_PUBLIC) = (
                      'flavorid',
                      'name',
                      'ram',
                      'vcpus',
                      'disk',
                      'swap',
                      'ephemeral',
                      'rxtx_factor',
                      'extra_specs',
                      'is_public',
                  )

    ATTRIBUTES = (IS_PUBLIC_ATTR, EXTRA_SPECS_ATTR) = ('is_public',
                                                       'extra_specs')

    properties_schema = {
        ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Unique ID of the flavor. If not specified, '
              'an UUID will be auto generated and used.'),
            support_status=support.SupportStatus(version='7.0.0')),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the flavor.'),
            support_status=support.SupportStatus(version='7.0.0'),
        ),
        RAM:
        properties.Schema(properties.Schema.INTEGER,
                          _('Memory in MB for the flavor.'),
                          required=True),
        VCPUS:
        properties.Schema(properties.Schema.INTEGER,
                          _('Number of VCPUs for the flavor.'),
                          required=True),
        DISK:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Size of local disk in GB. The "0" size is a special case that '
              'uses the native base image size as the size of the ephemeral '
              'root volume.'),
            default=0),
        SWAP:
        properties.Schema(properties.Schema.INTEGER,
                          _('Swap space in MB.'),
                          default=0),
        EPHEMERAL:
        properties.Schema(properties.Schema.INTEGER,
                          _('Size of a secondary ephemeral data disk in GB.'),
                          default=0),
        RXTX_FACTOR:
        properties.Schema(properties.Schema.NUMBER,
                          _('RX/TX factor.'),
                          default=1.0),
        EXTRA_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Key/Value pairs to extend the capabilities of the flavor.'),
            update_allowed=True,
        ),
        IS_PUBLIC:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Scope of flavor accessibility. Public or private. '
              'Default value is True, means public, shared '
              'across all projects.'),
            default=True,
            support_status=support.SupportStatus(version='6.0.0'),
        ),
    }

    attributes_schema = {
        IS_PUBLIC_ATTR:
        attributes.Schema(
            _('Whether the flavor is shared across all projects.'),
            support_status=support.SupportStatus(version='6.0.0'),
            type=attributes.Schema.BOOLEAN),
        EXTRA_SPECS_ATTR:
        attributes.Schema(
            _('Extra specs of the flavor in key-value pairs.'),
            support_status=support.SupportStatus(version='7.0.0'),
            type=attributes.Schema.MAP)
    }

    def handle_create(self):
        args = dict(self.properties)
        if not args['flavorid']:
            args['flavorid'] = 'auto'
        if not args['name']:
            args['name'] = self.physical_resource_name()
        flavor_keys = args.pop(self.EXTRA_SPECS)
        flavor = self.client().flavors.create(**args)
        self.resource_id_set(flavor.id)
        if flavor_keys:
            flavor.set_keys(flavor_keys)

        tenant = self.stack.context.tenant_id
        if not args['is_public']:
            # grant access only to the active project(private flavor)
            self.client().flavor_access.add_tenant_access(flavor, tenant)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Update nova flavor."""
        if self.EXTRA_SPECS in prop_diff:
            flavor = self.client().flavors.get(self.resource_id)
            old_keys = flavor.get_keys()
            flavor.unset_keys(old_keys)
            new_keys = prop_diff.get(self.EXTRA_SPECS)
            if new_keys is not None:
                flavor.set_keys(new_keys)

    def _resolve_attribute(self, name):
        flavor = self.client().flavors.get(self.resource_id)
        if name == self.IS_PUBLIC_ATTR:
            return getattr(flavor, name)
        if name == self.EXTRA_SPECS_ATTR:
            return flavor.get_keys()
Exemple #25
0
class Subnet(neutron.NeutronResource):
    """A resource for managing Neutron subnets.

    A subnet represents an IP address block that can be used for assigning IP
    addresses to virtual instances. Each subnet must have a CIDR and must be
    associated with a network. IPs can be either selected from the whole subnet
    CIDR, or from "allocation pools" that can be specified by the user.
    """

    entity = 'subnet'

    PROPERTIES = (
        NETWORK_ID,
        NETWORK,
        SUBNETPOOL,
        PREFIXLEN,
        CIDR,
        VALUE_SPECS,
        NAME,
        IP_VERSION,
        DNS_NAMESERVERS,
        GATEWAY_IP,
        ENABLE_DHCP,
        ALLOCATION_POOLS,
        TENANT_ID,
        HOST_ROUTES,
        IPV6_RA_MODE,
        IPV6_ADDRESS_MODE,
    ) = (
        'network_id',
        'network',
        'subnetpool',
        'prefixlen',
        'cidr',
        'value_specs',
        'name',
        'ip_version',
        'dns_nameservers',
        'gateway_ip',
        'enable_dhcp',
        'allocation_pools',
        'tenant_id',
        'host_routes',
        'ipv6_ra_mode',
        'ipv6_address_mode',
    )

    _ALLOCATION_POOL_KEYS = (
        ALLOCATION_POOL_START,
        ALLOCATION_POOL_END,
    ) = (
        'start',
        'end',
    )

    _HOST_ROUTES_KEYS = (
        ROUTE_DESTINATION,
        ROUTE_NEXTHOP,
    ) = (
        'destination',
        'nexthop',
    )

    _IPV6_DHCP_MODES = (
        DHCPV6_STATEFUL,
        DHCPV6_STATELESS,
        SLAAC,
    ) = (
        'dhcpv6-stateful',
        'dhcpv6-stateless',
        'slaac',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        NETWORK_ID_ATTR,
        TENANT_ID_ATTR,
        ALLOCATION_POOLS_ATTR,
        GATEWAY_IP_ATTR,
        HOST_ROUTES_ATTR,
        IP_VERSION_ATTR,
        CIDR_ATTR,
        DNS_NAMESERVERS_ATTR,
        ENABLE_DHCP_ATTR,
    ) = (
        'name',
        'network_id',
        'tenant_id',
        'allocation_pools',
        'gateway_ip',
        'host_routes',
        'ip_version',
        'cidr',
        'dns_nameservers',
        'enable_dhcp',
    )

    properties_schema = {
        NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % NETWORK,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the attached network.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.network')],
            support_status=support.SupportStatus(version='2014.2')),
        SUBNETPOOL:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of the subnet pool.'),
            constraints=[constraints.CustomConstraint('neutron.subnetpool')],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Prefix length for subnet allocation from subnet pool.'),
            constraints=[constraints.Range(min=0)],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _('The CIDR.'),
            constraints=[constraints.CustomConstraint('net_cidr')]),
        VALUE_SPECS:
        properties.Schema(properties.Schema.MAP,
                          _('Extra parameters to include in the request.'),
                          default={},
                          update_allowed=True),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of the subnet.'),
                          update_allowed=True),
        IP_VERSION:
        properties.Schema(properties.Schema.INTEGER,
                          _('The IP version, which is 4 or 6.'),
                          default=4,
                          constraints=[
                              constraints.AllowedValues([4, 6]),
                          ]),
        DNS_NAMESERVERS:
        properties.Schema(properties.Schema.LIST,
                          _('A specified set of DNS name servers to be used.'),
                          default=[],
                          update_allowed=True),
        GATEWAY_IP:
        properties.Schema(
            properties.Schema.STRING,
            _('The gateway IP address. Set to any of [ null | ~ | "" ] '
              'to create/update a subnet without a gateway. '
              'If omitted when creation, neutron will assign the first '
              'free IP address within the subnet to the gateway '
              'automatically. If remove this from template when update, '
              'the old gateway IP address will be detached.'),
            update_allowed=True),
        ENABLE_DHCP:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Set to true if DHCP is enabled and false if DHCP is disabled.'),
            default=True,
            update_allowed=True),
        ALLOCATION_POOLS:
        properties.Schema(
            properties.Schema.LIST,
            _('The start and end addresses for the allocation pools.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOCATION_POOL_START:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Start address for the allocation pool.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                    ALLOCATION_POOL_END:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('End address for the allocation pool.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the tenant who owns the network. Only administrative '
              'users can specify a tenant ID other than their own.')),
        HOST_ROUTES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of host route dictionaries for the subnet.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ROUTE_DESTINATION:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The destination for static route.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('net_cidr')
                                     ]),
                    ROUTE_NEXTHOP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The next hop for the destination.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        IPV6_RA_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('IPv6 RA (Router Advertisement) mode.'),
            constraints=[
                constraints.AllowedValues(
                    [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]),
            ],
            support_status=support.SupportStatus(version='2015.1')),
        IPV6_ADDRESS_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('IPv6 address mode.'),
            constraints=[
                constraints.AllowedValues(
                    [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]),
            ],
            support_status=support.SupportStatus(version='2015.1')),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the subnet."),
                          type=attributes.Schema.STRING),
        NETWORK_ID_ATTR:
        attributes.Schema(_("Parent network of the subnet."),
                          type=attributes.Schema.STRING),
        TENANT_ID_ATTR:
        attributes.Schema(_("Tenant owning the subnet."),
                          type=attributes.Schema.STRING),
        ALLOCATION_POOLS_ATTR:
        attributes.Schema(_("Ip allocation pools and their ranges."),
                          type=attributes.Schema.LIST),
        GATEWAY_IP_ATTR:
        attributes.Schema(_("Ip of the subnet's gateway."),
                          type=attributes.Schema.STRING),
        HOST_ROUTES_ATTR:
        attributes.Schema(_("Additional routes for this subnet."),
                          type=attributes.Schema.LIST),
        IP_VERSION_ATTR:
        attributes.Schema(_("Ip version for the subnet."),
                          type=attributes.Schema.STRING),
        CIDR_ATTR:
        attributes.Schema(_("CIDR block notation for this subnet."),
                          type=attributes.Schema.STRING),
        DNS_NAMESERVERS_ATTR:
        attributes.Schema(_("List of dns nameservers."),
                          type=attributes.Schema.LIST),
        ENABLE_DHCP_ATTR:
        attributes.Schema(
            _("'true' if DHCP is enabled for this subnet; 'false' otherwise."),
            type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.NETWORK],
                                        value_path=[self.NETWORK_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.SUBNETPOOL],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='subnetpool')
        ]

    @classmethod
    def _null_gateway_ip(cls, props):
        if cls.GATEWAY_IP not in props:
            return
        # Specifying null in the gateway_ip will result in
        # a property containing an empty string.
        # A null gateway_ip has special meaning in the API
        # so this needs to be set back to None.
        # See bug https://bugs.launchpad.net/heat/+bug/1226666
        if props.get(cls.GATEWAY_IP) == '':
            props[cls.GATEWAY_IP] = None

    def validate(self):
        super(Subnet, self).validate()
        subnetpool = self.properties[self.SUBNETPOOL]
        prefixlen = self.properties[self.PREFIXLEN]
        cidr = self.properties[self.CIDR]
        if subnetpool and cidr:
            raise exception.ResourcePropertyConflict(self.SUBNETPOOL,
                                                     self.CIDR)
        if not subnetpool and not cidr:
            raise exception.PropertyUnspecifiedError(self.SUBNETPOOL,
                                                     self.CIDR)
        if prefixlen and cidr:
            raise exception.ResourcePropertyConflict(self.PREFIXLEN, self.CIDR)
        ra_mode = self.properties[self.IPV6_RA_MODE]
        address_mode = self.properties[self.IPV6_ADDRESS_MODE]

        if (self.properties[self.IP_VERSION] == 4) and (ra_mode
                                                        or address_mode):
            msg = _('ipv6_ra_mode and ipv6_address_mode are not supported '
                    'for ipv4.')
            raise exception.StackValidationFailed(message=msg)
        if ra_mode and address_mode and (ra_mode != address_mode):
            msg = _('When both ipv6_ra_mode and ipv6_address_mode are set, '
                    'they must be equal.')
            raise exception.StackValidationFailed(message=msg)

        gateway_ip = self.properties.get(self.GATEWAY_IP)
        if (gateway_ip and gateway_ip not in ['~', '']
                and not netutils.is_valid_ip(gateway_ip)):
            msg = (_('Gateway IP address "%(gateway)s" is in '
                     'invalid format.'), gateway_ip)
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['network_id'] = props.pop(self.NETWORK)
        if self.SUBNETPOOL in props and props[self.SUBNETPOOL]:
            props['subnetpool_id'] = props.pop('subnetpool')
        self._null_gateway_ip(props)
        subnet = self.client().create_subnet({'subnet': props})['subnet']
        self.resource_id_set(subnet['id'])

    def handle_delete(self):
        try:
            self.client().delete_subnet(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            if (self.ALLOCATION_POOLS in prop_diff
                    and prop_diff[self.ALLOCATION_POOLS] is None):
                prop_diff[self.ALLOCATION_POOLS] = []

            # If the new value is '', set to None
            self._null_gateway_ip(prop_diff)

            self.client().update_subnet(self.resource_id,
                                        {'subnet': prop_diff})
Exemple #26
0
class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
    """A resource that implements Cinder volumes.

    Cinder volume is a storage in the form of block devices. It can be used,
    for example, for providing storage to instance. Volume supports creation
    from snapshot, backup or image. Also volume can be created only by size.
    """

    PROPERTIES = (
        AVAILABILITY_ZONE,
        SIZE,
        SNAPSHOT_ID,
        BACKUP_ID,
        NAME,
        DESCRIPTION,
        VOLUME_TYPE,
        METADATA,
        IMAGE_REF,
        IMAGE,
        SOURCE_VOLID,
        CINDER_SCHEDULER_HINTS,
        READ_ONLY,
        MULTI_ATTACH,
    ) = (
        'availability_zone',
        'size',
        'snapshot_id',
        'backup_id',
        'name',
        'description',
        'volume_type',
        'metadata',
        'imageRef',
        'image',
        'source_volid',
        'scheduler_hints',
        'read_only',
        'multiattach',
    )

    ATTRIBUTES = (
        AVAILABILITY_ZONE_ATTR,
        SIZE_ATTR,
        SNAPSHOT_ID_ATTR,
        DISPLAY_NAME_ATTR,
        DISPLAY_DESCRIPTION_ATTR,
        VOLUME_TYPE_ATTR,
        METADATA_ATTR,
        SOURCE_VOLID_ATTR,
        STATUS,
        CREATED_AT,
        BOOTABLE,
        METADATA_VALUES_ATTR,
        ENCRYPTED_ATTR,
        ATTACHMENTS,
        ATTACHMENTS_LIST,
        MULTI_ATTACH_ATTR,
    ) = (
        'availability_zone',
        'size',
        'snapshot_id',
        'display_name',
        'display_description',
        'volume_type',
        'metadata',
        'source_volid',
        'status',
        'created_at',
        'bootable',
        'metadata_values',
        'encrypted',
        'attachments',
        'attachments_list',
        'multiattach',
    )

    properties_schema = {
        AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('The availability zone in which the volume will be created.')),
        SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The size of the volume in GB. '
              'On update only increase in size is supported. This property '
              'is required unless property %(backup)s or %(vol)s or '
              '%(snapshot)s is specified.') %
            dict(backup=BACKUP_ID, vol=SOURCE_VOLID, snapshot=SNAPSHOT_ID),
            update_allowed=True,
            constraints=[
                constraints.Range(min=1),
            ]),
        SNAPSHOT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the snapshot to create the volume from.'),
            constraints=[constraints.CustomConstraint('cinder.snapshot')]),
        BACKUP_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the backup to create the volume from.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('cinder.backup')]),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('A name used to distinguish the volume.'),
            update_allowed=True,
        ),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('A description of the volume.'),
            update_allowed=True,
        ),
        VOLUME_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the type of volume to use, mapping to a '
              'specific backend.'),
            constraints=[constraints.CustomConstraint('cinder.vtype')],
            update_allowed=True),
        METADATA:
        properties.Schema(properties.Schema.MAP,
                          _('Key/value pairs to associate with the volume.'),
                          update_allowed=True,
                          default={}),
        IMAGE_REF:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the image to create the volume from.'),
                          support_status=support.SupportStatus(
                              status=support.HIDDEN,
                              message=_('Use property %s.') % IMAGE,
                              version='5.0.0',
                              previous_status=support.SupportStatus(
                                  status=support.DEPRECATED,
                                  version='2014.1'))),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the name or ID of the image to create the '
              'volume from.'),
            constraints=[constraints.CustomConstraint('glance.image')]),
        SOURCE_VOLID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the volume to use as source.'),
            constraints=[constraints.CustomConstraint('cinder.volume')]),
        CINDER_SCHEDULER_HINTS:
        properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key-value pairs specified by the client to help '
              'the Cinder scheduler creating a volume.'),
            support_status=support.SupportStatus(version='2015.1')),
        READ_ONLY:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Enables or disables read-only access mode of volume.'),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True,
        ),
        MULTI_ATTACH:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether allow the volume to be attached more than once.'),
            support_status=support.SupportStatus(version='6.0.0'),
            default=False),
    }

    attributes_schema = {
        AVAILABILITY_ZONE_ATTR:
        attributes.Schema(
            _('The availability zone in which the volume is located.'),
            type=attributes.Schema.STRING),
        SIZE_ATTR:
        attributes.Schema(_('The size of the volume in GB.'),
                          type=attributes.Schema.STRING),
        SNAPSHOT_ID_ATTR:
        attributes.Schema(
            _('The snapshot the volume was created from, if any.'),
            type=attributes.Schema.STRING),
        DISPLAY_NAME_ATTR:
        attributes.Schema(_('Name of the volume.'),
                          type=attributes.Schema.STRING),
        DISPLAY_DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the volume.'),
                          type=attributes.Schema.STRING),
        VOLUME_TYPE_ATTR:
        attributes.Schema(
            _('The type of the volume mapping to a backend, if any.'),
            type=attributes.Schema.STRING),
        METADATA_ATTR:
        attributes.Schema(_('Key/value pairs associated with the volume.'),
                          type=attributes.Schema.STRING),
        SOURCE_VOLID_ATTR:
        attributes.Schema(_('The volume used as source, if any.'),
                          type=attributes.Schema.STRING),
        STATUS:
        attributes.Schema(_('The current status of the volume.'),
                          type=attributes.Schema.STRING),
        CREATED_AT:
        attributes.Schema(_('The timestamp indicating volume creation.'),
                          type=attributes.Schema.STRING),
        BOOTABLE:
        attributes.Schema(
            _('Boolean indicating if the volume can be booted or not.'),
            type=attributes.Schema.STRING),
        METADATA_VALUES_ATTR:
        attributes.Schema(
            _('Key/value pairs associated with the volume in raw dict form.'),
            type=attributes.Schema.MAP),
        ENCRYPTED_ATTR:
        attributes.Schema(
            _('Boolean indicating if the volume is encrypted or not.'),
            type=attributes.Schema.STRING),
        ATTACHMENTS:
        attributes.Schema(
            _('A string representation of the list of attachments of the '
              'volume.'),
            type=attributes.Schema.STRING,
            cache_mode=attributes.Schema.CACHE_NONE,
            support_status=support.SupportStatus(
                status=support.DEPRECATED,
                message=_('Use property %s.') % ATTACHMENTS_LIST,
                version='9.0.0',
                previous_status=support.SupportStatus(status=support.SUPPORTED,
                                                      version='2015.1'))),
        ATTACHMENTS_LIST:
        attributes.Schema(
            _('The list of attachments of the volume.'),
            type=attributes.Schema.LIST,
            cache_mode=attributes.Schema.CACHE_NONE,
            support_status=support.SupportStatus(version='9.0.0'),
        ),
        MULTI_ATTACH_ATTR:
        attributes.Schema(
            _('Boolean indicating whether allow the volume to be attached '
              'more than once.'),
            type=attributes.Schema.BOOLEAN,
            support_status=support.SupportStatus(version='6.0.0'),
        ),
    }

    _volume_creating_status = ['creating', 'restoring-backup', 'downloading']

    entity = 'volumes'

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.IMAGE],
                                        value_path=[self.IMAGE_REF])
        ]

    def _name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return super(CinderVolume, self)._name()

    def _description(self):
        return self.properties[self.DESCRIPTION]

    def _create_arguments(self):
        arguments = {
            'size': self.properties[self.SIZE],
            'availability_zone': self.properties[self.AVAILABILITY_ZONE],
        }

        scheduler_hints = self._scheduler_hints(
            self.properties[self.CINDER_SCHEDULER_HINTS])
        if scheduler_hints:
            arguments[self.CINDER_SCHEDULER_HINTS] = scheduler_hints

        if self.properties[self.IMAGE]:
            arguments['imageRef'] = self.client_plugin(
                'glance').find_image_by_name_or_id(self.properties[self.IMAGE])
        elif self.properties[self.IMAGE_REF]:
            arguments['imageRef'] = self.properties[self.IMAGE_REF]

        optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
                     self.METADATA, self.MULTI_ATTACH)

        arguments.update((prop, self.properties[prop]) for prop in optionals
                         if self.properties[prop] is not None)

        return arguments

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        cinder = self.client()
        vol = cinder.volumes.get(self.resource_id)
        if name == self.METADATA_ATTR:
            return six.text_type(jsonutils.dumps(vol.metadata))
        elif name == self.METADATA_VALUES_ATTR:
            return vol.metadata
        if name == self.DISPLAY_NAME_ATTR:
            return vol.name
        elif name == self.DISPLAY_DESCRIPTION_ATTR:
            return vol.description
        elif name == self.ATTACHMENTS_LIST:
            return vol.attachments
        return six.text_type(getattr(vol, name))

    def check_create_complete(self, vol_id):
        complete = super(CinderVolume, self).check_create_complete(vol_id)
        # Cinder just supports update read only for volume in available,
        # if we update in handle_create(), maybe the volume still in
        # creating, then cinder will raise an exception
        if complete:
            self._store_config_default_properties()
            self._update_read_only(self.properties[self.READ_ONLY])

        return complete

    def _store_config_default_properties(self, attributes=None):
        """Method for storing default values of properties in resource data.

        Some properties have default values, specified in project configuration
        file, so cannot be hardcoded into properties_schema, but should be
        stored for further using. So need to get created resource and take
        required property's value.
        """
        if attributes is None:
            attributes = self._show_resource()

        if attributes.get('volume_type') is not None:
            self.data_set(self.VOLUME_TYPE, attributes['volume_type'])
        else:
            self.data_delete(self.VOLUME_TYPE)

    def _extend_volume(self, new_size):
        try:
            self.client().volumes.extend(self.resource_id, new_size)
        except Exception as ex:
            if self.client_plugin().is_client_exception(ex):
                raise exception.Error(
                    _("Failed to extend volume %(vol)s - %(err)s") % {
                        'vol': self.resource_id,
                        'err': six.text_type(ex)
                    })
            else:
                raise
        return True

    def _update_read_only(self, read_only_flag):
        if read_only_flag is not None:
            self.client().volumes.update_readonly_flag(self.resource_id,
                                                       read_only_flag)

        return True

    def _check_extend_volume_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'extending':
            LOG.debug("Volume %s is being extended", vol.id)
            return False

        if vol.status != 'available':
            LOG.info(
                "Resize failed: Volume %(vol)s "
                "is in %(status)s state.", {
                    'vol': vol.id,
                    'status': vol.status
                })
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status, result=_('Volume resize failed'))

        LOG.info('Volume %(id)s resize complete', {'id': vol.id})
        return True

    def _backup_restore(self, vol_id, backup_id):
        try:
            self.client().restores.restore(backup_id, vol_id)
        except Exception as ex:
            if self.client_plugin().is_client_exception(ex):
                raise exception.Error(
                    _("Failed to restore volume %(vol)s from backup %(backup)s "
                      "- %(err)s") % {
                          'vol': vol_id,
                          'backup': backup_id,
                          'err': ex
                      })
            else:
                raise
        return True

    def _check_backup_restore_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'restoring-backup':
            LOG.debug("Volume %s is being restoring from backup", vol.id)
            return False

        if vol.status != 'available':
            LOG.info(
                "Restore failed: Volume %(vol)s is in %(status)s "
                "state.", {
                    'vol': vol.id,
                    'status': vol.status
                })
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume backup restore failed'))

        LOG.info('Volume %s backup restore complete', vol.id)
        return True

    def needs_replace_failed(self):
        if not self.resource_id:
            return True

        with self.client_plugin().ignore_not_found:
            vol = self.client().volumes.get(self.resource_id)
            return vol.status in ('error', 'deleting')

        return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        vol = None
        cinder = self.client()
        prg_resize = None
        prg_attach = None
        prg_detach = None
        prg_restore = None
        prg_access = None

        # update the name and description for cinder volume
        if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
            vol = cinder.volumes.get(self.resource_id)
            update_name = (prop_diff.get(self.NAME)
                           or self.properties[self.NAME])
            update_description = (prop_diff.get(self.DESCRIPTION)
                                  or self.properties[self.DESCRIPTION])
            kwargs = self._fetch_name_and_description(update_name,
                                                      update_description)
            cinder.volumes.update(vol, **kwargs)
        # update the metadata for cinder volume
        if self.METADATA in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            metadata = prop_diff.get(self.METADATA)
            cinder.volumes.update_all_metadata(vol, metadata)
        # retype
        if self.VOLUME_TYPE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            new_vol_type = prop_diff.get(self.VOLUME_TYPE)
            cinder.volumes.retype(vol, new_vol_type, 'never')
        # update read_only access mode
        if self.READ_ONLY in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            flag = prop_diff.get(self.READ_ONLY)
            prg_access = progress.VolumeUpdateAccessModeProgress(
                read_only=flag)
            prg_detach, prg_attach = self._detach_attach_progress(vol)
        # restore the volume from backup
        if self.BACKUP_ID in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            prg_restore = progress.VolumeBackupRestoreProgress(
                vol_id=self.resource_id,
                backup_id=prop_diff.get(self.BACKUP_ID))
            prg_detach, prg_attach = self._detach_attach_progress(vol)
        # extend volume size
        if self.SIZE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)

            new_size = prop_diff[self.SIZE]
            if new_size < vol.size:
                raise exception.NotSupported(feature=_("Shrinking volume"))

            elif new_size > vol.size:
                prg_resize = progress.VolumeResizeProgress(size=new_size)
                prg_detach, prg_attach = self._detach_attach_progress(vol)

        return prg_restore, prg_detach, prg_resize, prg_access, prg_attach

    def _detach_attach_progress(self, vol):
        prg_attach = None
        prg_detach = None
        if vol.attachments:
            # NOTE(pshchelo):
            # this relies on current behavior of cinder attachments,
            # i.e. volume attachments is a list with len<=1,
            # so the volume can be attached only to single instance,
            # and id of attachment is the same as id of the volume
            # it describes, so detach/attach the same volume
            # will not change volume attachment id.
            server_id = vol.attachments[0]['server_id']
            device = vol.attachments[0]['device']
            attachment_id = vol.attachments[0]['id']
            prg_detach = progress.VolumeDetachProgress(server_id, vol.id,
                                                       attachment_id)
            prg_attach = progress.VolumeAttachProgress(server_id, vol.id,
                                                       device)

        return prg_detach, prg_attach

    def _detach_volume_to_complete(self, prg_detach):
        if not prg_detach.called:
            self.client_plugin('nova').detach_volume(prg_detach.srv_id,
                                                     prg_detach.attach_id)
            prg_detach.called = True
            return False
        if not prg_detach.cinder_complete:
            cinder_complete_res = self.client_plugin(
            ).check_detach_volume_complete(prg_detach.vol_id)
            prg_detach.cinder_complete = cinder_complete_res
            return False
        if not prg_detach.nova_complete:
            prg_detach.nova_complete = self.client_plugin(
                'nova').check_detach_volume_complete(prg_detach.srv_id,
                                                     prg_detach.attach_id)
            return False

    def _attach_volume_to_complete(self, prg_attach):
        if not prg_attach.called:
            prg_attach.called = self.client_plugin('nova').attach_volume(
                prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
            return False
        if not prg_attach.complete:
            prg_attach.complete = self.client_plugin(
            ).check_attach_volume_complete(prg_attach.vol_id)
            return prg_attach.complete

    def check_update_complete(self, checkers):
        prg_restore, prg_detach, prg_resize, prg_access, prg_attach = checkers
        # detach volume
        if prg_detach:
            if not prg_detach.nova_complete:
                self._detach_volume_to_complete(prg_detach)
                return False
        if prg_restore:
            if not prg_restore.called:
                prg_restore.called = self._backup_restore(
                    prg_restore.vol_id, prg_restore.backup_id)
                return False
            if not prg_restore.complete:
                prg_restore.complete = self._check_backup_restore_complete()
                return prg_restore.complete and not prg_resize
        # resize volume
        if prg_resize:
            if not prg_resize.called:
                prg_resize.called = self._extend_volume(prg_resize.size)
                return False
            if not prg_resize.complete:
                prg_resize.complete = self._check_extend_volume_complete()
                return prg_resize.complete and not prg_attach
        # update read_only access mode
        if prg_access:
            if not prg_access.called:
                prg_access.called = self._update_read_only(
                    prg_access.read_only)
                return False
        # reattach volume back
        if prg_attach:
            return self._attach_volume_to_complete(prg_attach)
        return True

    def handle_snapshot(self):
        backup = self.client().backups.create(self.resource_id, force=True)
        self.data_set('backup_id', backup.id)
        return backup.id

    def check_snapshot_complete(self, backup_id):
        backup = self.client().backups.get(backup_id)
        if backup.status == 'creating':
            return False
        if backup.status == 'available':
            return True
        raise exception.Error(backup.fail_reason)

    def handle_delete_snapshot(self, snapshot):
        backup_id = snapshot['resource_data'].get('backup_id')
        if not backup_id:
            return
        try:
            self.client().backups.delete(backup_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return
        else:
            return backup_id

    def check_delete_snapshot_complete(self, backup_id):
        if not backup_id:
            return True
        try:
            self.client().backups.get(backup_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True
        else:
            return False

    def _build_exclusive_options(self):
        exclusive_options = []
        allow_no_size_options = []
        if self.properties.get(self.SNAPSHOT_ID):
            exclusive_options.append(self.SNAPSHOT_ID)
            allow_no_size_options.append(self.SNAPSHOT_ID)
        if self.properties.get(self.SOURCE_VOLID):
            exclusive_options.append(self.SOURCE_VOLID)
            allow_no_size_options.append(self.SOURCE_VOLID)
        if self.properties.get(self.IMAGE):
            exclusive_options.append(self.IMAGE)
        if self.properties.get(self.IMAGE_REF):
            exclusive_options.append(self.IMAGE_REF)
        return exclusive_options, allow_no_size_options

    def _validate_create_sources(self):
        exclusive_options, allow_no_size_ops = self._build_exclusive_options()
        size = self.properties.get(self.SIZE)
        if (size is None and
            (len(allow_no_size_ops) != 1 or len(exclusive_options) != 1)):
            msg = (_('If neither "%(backup_id)s" nor "%(size)s" is '
                     'provided, one and only one of "%(source_vol)s", '
                     '"%(snapshot_id)s" must be specified, but currently '
                     'specified options: %(exclusive_options)s.') % {
                         'backup_id': self.BACKUP_ID,
                         'size': self.SIZE,
                         'source_vol': self.SOURCE_VOLID,
                         'snapshot_id': self.SNAPSHOT_ID,
                         'exclusive_options': exclusive_options
                     })
            raise exception.StackValidationFailed(message=msg)
        elif size and len(exclusive_options) > 1:
            msg = (_('If "%(size)s" is provided, only one of '
                     '"%(image)s", "%(image_ref)s", "%(source_vol)s", '
                     '"%(snapshot_id)s" can be specified, but currently '
                     'specified options: %(exclusive_options)s.') % {
                         'size': self.SIZE,
                         'image': self.IMAGE,
                         'image_ref': self.IMAGE_REF,
                         'source_vol': self.SOURCE_VOLID,
                         'snapshot_id': self.SNAPSHOT_ID,
                         'exclusive_options': exclusive_options
                     })
            raise exception.StackValidationFailed(message=msg)

    def validate(self):
        """Validate provided params."""
        res = super(CinderVolume, self).validate()
        if res is not None:
            return res

        # can not specify both image and imageRef
        image = self.properties.get(self.IMAGE)
        imageRef = self.properties.get(self.IMAGE_REF)
        if image and imageRef:
            raise exception.ResourcePropertyConflict(self.IMAGE,
                                                     self.IMAGE_REF)
        # if not create from backup, need to check other create sources
        if not self.properties.get(self.BACKUP_ID):
            self._validate_create_sources()

    def handle_restore(self, defn, restore_data):
        backup_id = restore_data['resource_data']['backup_id']
        # we can't ignore 'size' property: if user update the size
        # of volume after snapshot, we need to change to old size
        # when restore the volume.
        ignore_props = (self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID)
        props = dict((key, value)
                     for (key, value) in self.properties.data.items()
                     if key not in ignore_props and value is not None)
        props[self.BACKUP_ID] = backup_id
        return defn.freeze(properties=props)

    def parse_live_resource_data(self, resource_properties, resource_data):
        volume_reality = {}

        if (resource_data.get(self.METADATA) and resource_data.get(
                self.METADATA).get('readonly') is not None):
            read_only = resource_data.get(self.METADATA).pop('readonly')
            volume_reality.update({self.READ_ONLY: read_only})

        old_vt = self.data().get(self.VOLUME_TYPE)
        new_vt = resource_data.get(self.VOLUME_TYPE)
        if old_vt != new_vt:
            volume_reality.update({self.VOLUME_TYPE: new_vt})
            self._store_config_default_properties(dict(volume_type=new_vt))

        props_keys = [
            self.SIZE, self.NAME, self.DESCRIPTION, self.METADATA,
            self.BACKUP_ID
        ]
        for key in props_keys:
            volume_reality.update({key: resource_data.get(key)})

        return volume_reality
Exemple #27
0
class HeatWaitCondition(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        HANDLE,
        TIMEOUT,
        COUNT,
    ) = (
        'handle',
        'timeout',
        'count',
    )

    ATTRIBUTES = (DATA, ) = ('data', )

    properties_schema = {
        HANDLE:
        properties.Schema(
            properties.Schema.STRING,
            _('A reference to the wait condition handle used to signal this '
              'wait condition.'),
            required=True),
        TIMEOUT:
        properties.Schema(
            properties.Schema.NUMBER,
            _('The number of seconds to wait for the correct number of '
              'signals to arrive.'),
            required=True,
            constraints=[
                constraints.Range(1, 43200),
            ]),
        COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of success signals that must be received before '
              'the stack creation process continues.'),
            constraints=[
                constraints.Range(min=1),
            ],
            default=1,
            update_allowed=True),
    }

    attributes_schema = {
        DATA:
        attributes.Schema(_('JSON string containing data associated with wait '
                            'condition signals sent to the handle.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
    }

    def __init__(self, name, definition, stack):
        super(HeatWaitCondition, self).__init__(name, definition, stack)

    def _get_handle_resource(self):
        return self.stack.resource_by_refid(self.properties[self.HANDLE])

    def _wait(self, handle, started_at, timeout_in):
        if timeutils.is_older_than(started_at, timeout_in):
            exc = wc_base.WaitConditionTimeout(self, handle)
            LOG.info(_LI('%(name)s Timed out (%(timeout)s)'), {
                'name': str(self),
                'timeout': str(exc)
            })
            raise exc

        handle_status = handle.get_status()

        if any(s != handle.STATUS_SUCCESS for s in handle_status):
            failure = wc_base.WaitConditionFailure(self, handle)
            LOG.info(_LI('%(name)s Failed (%(failure)s)'), {
                'name': str(self),
                'failure': str(failure)
            })
            raise failure

        if len(handle_status) >= self.properties[self.COUNT]:
            LOG.info(_LI("%s Succeeded"), str(self))
            return True
        return False

    def handle_create(self):
        handle = self._get_handle_resource()
        started_at = timeutils.utcnow()
        return handle, started_at, float(self.properties[self.TIMEOUT])

    def check_create_complete(self, data):
        return self._wait(*data)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.properties = json_snippet.properties(self.properties_schema,
                                                      self.context)

        handle = self._get_handle_resource()
        started_at = timeutils.utcnow()
        return handle, started_at, float(self.properties[self.TIMEOUT])

    def check_update_complete(self, data):
        return self._wait(*data)

    def handle_delete(self):
        handle = self._get_handle_resource()
        if handle:
            handle.metadata_set({})

    def _resolve_attribute(self, key):
        handle = self._get_handle_resource()
        if key == self.DATA:
            meta = handle.metadata_get(refresh=True)
            res = {k: meta[k][handle.DATA] for k in meta}
            LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s' % {
                'name': self.name,
                'key': key,
                'res': res
            })

            return six.text_type(jsonutils.dumps(res))
Exemple #28
0
class RandomString(resource.Resource):
    '''
    A resource which generates a random string.

    This is useful for configuring passwords and secrets on services.
    '''

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        LENGTH,
        SEQUENCE,
        CHARACTER_CLASSES,
        CHARACTER_SEQUENCES,
        SALT,
    ) = (
        'length',
        'sequence',
        'character_classes',
        'character_sequences',
        'salt',
    )

    _CHARACTER_CLASSES_KEYS = (
        CHARACTER_CLASSES_CLASS,
        CHARACTER_CLASSES_MIN,
    ) = (
        'class',
        'min',
    )

    _CHARACTER_SEQUENCES = (
        CHARACTER_SEQUENCES_SEQUENCE,
        CHARACTER_SEQUENCES_MIN,
    ) = (
        'sequence',
        'min',
    )

    ATTRIBUTES = (VALUE, ) = ('value', )

    properties_schema = {
        LENGTH:
        properties.Schema(properties.Schema.INTEGER,
                          _('Length of the string to generate.'),
                          default=32,
                          constraints=[
                              constraints.Range(1, 512),
                          ]),
        SEQUENCE:
        properties.Schema(
            properties.Schema.STRING,
            _('Sequence of characters to build the random string from.'),
            constraints=[
                constraints.AllowedValues([
                    'lettersdigits', 'letters', 'lowercase', 'uppercase',
                    'digits', 'hexdigits', 'octdigits'
                ]),
            ],
            support_status=support.SupportStatus(
                support.DEPRECATED,
                _('Use property %s.') % CHARACTER_CLASSES)),
        CHARACTER_CLASSES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of character class and their constraints to generate '
              'the random string from.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CHARACTER_CLASSES_CLASS:
                    properties.Schema(
                        properties.Schema.STRING,
                        (_('A character class and its corresponding %(min)s '
                           'constraint to generate the random string from.') %
                         {
                             'min': CHARACTER_CLASSES_MIN
                         }),
                        constraints=[
                            constraints.AllowedValues([
                                'lettersdigits', 'letters', 'lowercase',
                                'uppercase', 'digits', 'hexdigits', 'octdigits'
                            ]),
                        ],
                        default='lettersdigits'),
                    CHARACTER_CLASSES_MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of characters from this '
                          'character class that will be in the generated '
                          'string.'),
                        default=1,
                        constraints=[
                            constraints.Range(1, 512),
                        ])
                })),
        CHARACTER_SEQUENCES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of character sequences and their constraints to '
              'generate the random string from.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CHARACTER_SEQUENCES_SEQUENCE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('A character sequence and its corresponding %(min)s '
                          'constraint to generate the random string '
                          'from.') % {'min': CHARACTER_SEQUENCES_MIN},
                        required=True),
                    CHARACTER_SEQUENCES_MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of characters from this '
                          'sequence that will be in the generated '
                          'string.'),
                        default=1,
                        constraints=[
                            constraints.Range(1, 512),
                        ])
                })),
        SALT:
        properties.Schema(
            properties.Schema.STRING,
            _('Value which can be set or changed on stack update to trigger '
              'the resource for replacement with a new random string . The '
              'salt value itself is ignored by the random generator.')),
    }

    attributes_schema = {
        VALUE:
        attributes.Schema(_(
            'The random string generated by this resource. This value is '
            'also available by referencing the resource.'),
                          cache_mode=attributes.Schema.CACHE_NONE),
    }

    _sequences = {
        'lettersdigits': string.ascii_letters + string.digits,
        'letters': string.ascii_letters,
        'lowercase': string.ascii_lowercase,
        'uppercase': string.ascii_uppercase,
        'digits': string.digits,
        'hexdigits': string.digits + 'ABCDEF',
        'octdigits': string.octdigits
    }

    @staticmethod
    def _deprecated_random_string(sequence, length):
        rand = random.SystemRandom()
        return ''.join(rand.choice(sequence) for x in six.moves.xrange(length))

    def _generate_random_string(self, char_sequences, char_classes, length):
        random_string = ""

        # Add the minimum number of chars from each char sequence & char class
        if char_sequences:
            for char_seq in char_sequences:
                seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE]
                seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN]
                for i in xrange(seq_min):
                    random_string += random.choice(seq)

        if char_classes:
            for char_class in char_classes:
                cclass_class = char_class[self.CHARACTER_CLASSES_CLASS]
                cclass_seq = self._sequences[cclass_class]
                cclass_min = char_class[self.CHARACTER_CLASSES_MIN]
                for i in xrange(cclass_min):
                    random_string += random.choice(cclass_seq)

        def random_class_char():
            cclass_dict = random.choice(char_classes)
            cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS]
            cclass_seq = self._sequences[cclass_class]
            return random.choice(cclass_seq)

        def random_seq_char():
            seq_dict = random.choice(char_sequences)
            seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE]
            return random.choice(seq)

        # Fill up rest with random chars from provided sequences & classes
        if char_sequences and char_classes:
            weighted_choices = ([True] * len(char_classes) +
                                [False] * len(char_sequences))
            while len(random_string) < length:
                if random.choice(weighted_choices):
                    random_string += random_class_char()
                else:
                    random_string += random_seq_char()

        elif char_sequences:
            while len(random_string) < length:
                random_string += random_seq_char()

        else:
            while len(random_string) < length:
                random_string += random_class_char()

        # Randomize string
        random_string = ''.join(
            random.sample(random_string, len(random_string)))
        return random_string

    def validate(self):
        super(RandomString, self).validate()
        sequence = self.properties.get(self.SEQUENCE)
        char_sequences = self.properties.get(self.CHARACTER_SEQUENCES)
        char_classes = self.properties.get(self.CHARACTER_CLASSES)

        if sequence and (char_sequences or char_classes):
            msg = (_("Cannot use deprecated '%(seq)s' property along with "
                     "'%(char_seqs)s' or '%(char_classes)s' properties") % {
                         'seq': self.SEQUENCE,
                         'char_seqs': self.CHARACTER_SEQUENCES,
                         'char_classes': self.CHARACTER_CLASSES
                     })
            raise exception.StackValidationFailed(message=msg)

        def char_min(char_dicts, min_prop):
            if char_dicts:
                return sum(char_dict[min_prop] for char_dict in char_dicts)
            return 0

        length = self.properties.get(self.LENGTH)
        min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) +
                      char_min(char_classes, self.CHARACTER_CLASSES_MIN))
        if min_length > length:
            msg = _("Length property cannot be smaller than combined "
                    "character class and character sequence minimums")
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        char_sequences = self.properties.get(self.CHARACTER_SEQUENCES)
        char_classes = self.properties.get(self.CHARACTER_CLASSES)
        length = self.properties.get(self.LENGTH)

        if char_sequences or char_classes:
            random_string = self._generate_random_string(
                char_sequences, char_classes, length)
        else:
            sequence = self.properties.get(self.SEQUENCE)
            if not sequence:  # Deprecated property not provided, use a default
                sequence = "lettersdigits"

            char_seq = self._sequences[sequence]
            random_string = self._deprecated_random_string(char_seq, length)

        self.data_set('value', random_string, redact=True)
        self.resource_id_set(self.physical_resource_name())

    def _resolve_attribute(self, name):
        if name == self.VALUE:
            return self.data().get(self.VALUE)

    def FnGetRefId(self):
        if self.resource_id is not None:
            return self.data().get('value')
        else:
            return six.text_type(self.name)
Exemple #29
0
class LoadBalancer(stack_resource.StackResource):
    """Implements a HAProxy-bearing instance as a nested stack.

    The template for the nested stack can be redefined with
    ``loadbalancer_template`` option in ``heat.conf``.

    Generally the image used for the instance must have the following
    packages installed or available for installation at runtime::

        - heat-cfntools and its dependencies like python-psutil
        - cronie
        - socat
        - haproxy

    Current default builtin template uses Fedora 21 x86_64 base cloud image
    (https://getfedora.org/cloud/download/)
    and apart from installing packages goes through some hoops
    around SELinux due to pecularities of heat-cfntools.

    """

    PROPERTIES = (
        AVAILABILITY_ZONES,
        HEALTH_CHECK,
        INSTANCES,
        LISTENERS,
        APP_COOKIE_STICKINESS_POLICY,
        LBCOOKIE_STICKINESS_POLICY,
        SECURITY_GROUPS,
        SUBNETS,
    ) = (
        'AvailabilityZones',
        'HealthCheck',
        'Instances',
        'Listeners',
        'AppCookieStickinessPolicy',
        'LBCookieStickinessPolicy',
        'SecurityGroups',
        'Subnets',
    )

    _HEALTH_CHECK_KEYS = (
        HEALTH_CHECK_HEALTHY_THRESHOLD,
        HEALTH_CHECK_INTERVAL,
        HEALTH_CHECK_TARGET,
        HEALTH_CHECK_TIMEOUT,
        HEALTH_CHECK_UNHEALTHY_THRESHOLD,
    ) = (
        'HealthyThreshold',
        'Interval',
        'Target',
        'Timeout',
        'UnhealthyThreshold',
    )

    _LISTENER_KEYS = (
        LISTENER_INSTANCE_PORT,
        LISTENER_LOAD_BALANCER_PORT,
        LISTENER_PROTOCOL,
        LISTENER_SSLCERTIFICATE_ID,
        LISTENER_POLICY_NAMES,
    ) = (
        'InstancePort',
        'LoadBalancerPort',
        'Protocol',
        'SSLCertificateId',
        'PolicyNames',
    )

    ATTRIBUTES = (
        CANONICAL_HOSTED_ZONE_NAME,
        CANONICAL_HOSTED_ZONE_NAME_ID,
        DNS_NAME,
        SOURCE_SECURITY_GROUP_GROUP_NAME,
        SOURCE_SECURITY_GROUP_OWNER_ALIAS,
    ) = (
        'CanonicalHostedZoneName',
        'CanonicalHostedZoneNameID',
        'DNSName',
        'SourceSecurityGroup.GroupName',
        'SourceSecurityGroup.OwnerAlias',
    )

    properties_schema = {
        AVAILABILITY_ZONES:
        properties.Schema(
            properties.Schema.LIST,
            _('The Availability Zones in which to create the load balancer.'),
            required=True),
        HEALTH_CHECK:
        properties.Schema(
            properties.Schema.MAP,
            _('An application health check for the instances.'),
            schema={
                HEALTH_CHECK_HEALTHY_THRESHOLD:
                properties.Schema(
                    properties.Schema.NUMBER,
                    _('The number of consecutive health probe successes '
                      'required before moving the instance to the '
                      'healthy state.'),
                    required=True),
                HEALTH_CHECK_INTERVAL:
                properties.Schema(
                    properties.Schema.NUMBER,
                    _('The approximate interval, in seconds, between '
                      'health checks of an individual instance.'),
                    required=True),
                HEALTH_CHECK_TARGET:
                properties.Schema(properties.Schema.STRING,
                                  _('The port being checked.'),
                                  required=True),
                HEALTH_CHECK_TIMEOUT:
                properties.Schema(properties.Schema.NUMBER,
                                  _('Health probe timeout, in seconds.'),
                                  required=True),
                HEALTH_CHECK_UNHEALTHY_THRESHOLD:
                properties.Schema(
                    properties.Schema.NUMBER,
                    _('The number of consecutive health probe failures '
                      'required before moving the instance to the '
                      'unhealthy state'),
                    required=True),
            }),
        INSTANCES:
        properties.Schema(properties.Schema.LIST,
                          _('The list of instance IDs load balanced.'),
                          update_allowed=True),
        LISTENERS:
        properties.Schema(
            properties.Schema.LIST,
            _('One or more listeners for this load balancer.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    LISTENER_INSTANCE_PORT:
                    properties.Schema(
                        properties.Schema.NUMBER,
                        _('TCP port on which the instance server is '
                          'listening.'),
                        required=True),
                    LISTENER_LOAD_BALANCER_PORT:
                    properties.Schema(
                        properties.Schema.NUMBER,
                        _('The external load balancer port number.'),
                        required=True),
                    LISTENER_PROTOCOL:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The load balancer transport protocol to use.'),
                        required=True,
                        constraints=[
                            constraints.AllowedValues(['TCP', 'HTTP']),
                        ]),
                    LISTENER_SSLCERTIFICATE_ID:
                    properties.Schema(properties.Schema.STRING,
                                      _('Not Implemented.'),
                                      implemented=False),
                    LISTENER_POLICY_NAMES:
                    properties.Schema(properties.Schema.LIST,
                                      _('Not Implemented.'),
                                      implemented=False),
                },
            ),
            required=True),
        APP_COOKIE_STICKINESS_POLICY:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        LBCOOKIE_STICKINESS_POLICY:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        SECURITY_GROUPS:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        SUBNETS:
        properties.Schema(properties.Schema.LIST,
                          _('Not Implemented.'),
                          implemented=False),
    }

    attributes_schema = {
        CANONICAL_HOSTED_ZONE_NAME:
        attributes.Schema(
            _("The name of the hosted zone that is associated with the "
              "LoadBalancer.")),
        CANONICAL_HOSTED_ZONE_NAME_ID:
        attributes.Schema(
            _("The ID of the hosted zone name that is associated with the "
              "LoadBalancer.")),
        DNS_NAME:
        attributes.Schema(_("The DNS name for the LoadBalancer.")),
        SOURCE_SECURITY_GROUP_GROUP_NAME:
        attributes.Schema(
            _("The security group that you can use as part of your inbound "
              "rules for your LoadBalancer's back-end instances.")),
        SOURCE_SECURITY_GROUP_OWNER_ALIAS:
        attributes.Schema(_("Owner of the source security group.")),
    }

    def _haproxy_config_global(self):
        return '''
global
    daemon
    maxconn 256
    stats socket /tmp/.haproxy-stats

defaults
    mode http
    timeout connect 5000ms
    timeout client 50000ms
    timeout server 50000ms
'''

    def _haproxy_config_frontend(self):
        listener = self.properties[self.LISTENERS][0]
        lb_port = listener[self.LISTENER_LOAD_BALANCER_PORT]
        return '''
frontend http
    bind *:%s
    default_backend servers
''' % (lb_port)

    def _haproxy_config_backend(self):
        health_chk = self.properties[self.HEALTH_CHECK]
        if health_chk:
            timeout = int(health_chk[self.HEALTH_CHECK_TIMEOUT])
            timeout_check = 'timeout check %ds' % timeout
            spaces = '    '
        else:
            timeout_check = ''
            spaces = ''

        return '''
backend servers
    balance roundrobin
    option http-server-close
    option forwardfor
    option httpchk
%s%s
''' % (spaces, timeout_check)

    def _haproxy_config_servers(self, instances):
        listener = self.properties[self.LISTENERS][0]
        inst_port = listener[self.LISTENER_INSTANCE_PORT]
        spaces = '    '
        check = ''
        health_chk = self.properties[self.HEALTH_CHECK]
        if health_chk:
            check = ' check inter %ss fall %s rise %s' % (
                health_chk[self.HEALTH_CHECK_INTERVAL],
                health_chk[self.HEALTH_CHECK_UNHEALTHY_THRESHOLD],
                health_chk[self.HEALTH_CHECK_HEALTHY_THRESHOLD])

        servers = []
        n = 1
        nova_cp = self.client_plugin('nova')
        for i in instances or []:
            ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0'
            LOG.debug('haproxy server:%s' % ip)
            servers.append('%sserver server%d %s:%s%s' %
                           (spaces, n, ip, inst_port, check))
            n = n + 1
        return '\n'.join(servers)

    def _haproxy_config(self, instances):
        # initial simplifications:
        # - only one Listener
        # - only http (no tcp or ssl)
        #
        # option httpchk HEAD /check.txt HTTP/1.0
        return '%s%s%s%s\n' % (self._haproxy_config_global(),
                               self._haproxy_config_frontend(),
                               self._haproxy_config_backend(),
                               self._haproxy_config_servers(instances))

    def get_parsed_template(self):
        if cfg.CONF.loadbalancer_template:
            with open(cfg.CONF.loadbalancer_template) as templ_fd:
                LOG.info(_LI('Using custom loadbalancer template %s'),
                         cfg.CONF.loadbalancer_template)
                contents = templ_fd.read()
        else:
            contents = lb_template_default
        return template_format.parse(contents)

    def child_params(self):
        params = {}

        # If the owning stack defines KeyName, we use that key for the nested
        # template, otherwise use no key
        if 'KeyName' in self.stack.parameters:
            params['KeyName'] = self.stack.parameters['KeyName']

        return params

    def child_template(self):
        templ = self.get_parsed_template()

        # If the owning stack defines KeyName, we use that key for the nested
        # template, otherwise use no key
        if 'KeyName' not in self.stack.parameters:
            del templ['Resources']['LB_instance']['Properties']['KeyName']
            del templ['Parameters']['KeyName']

        return templ

    def handle_create(self):
        templ = self.child_template()
        params = self.child_params()

        if self.properties[self.INSTANCES]:
            md = templ['Resources']['LB_instance']['Metadata']
            files = md['AWS::CloudFormation::Init']['config']['files']
            cfg = self._haproxy_config(self.properties[self.INSTANCES])
            files['/etc/haproxy/haproxy.cfg']['content'] = cfg

        return self.create_with_template(templ, params)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        '''
        re-generate the Metadata
        save it to the db.
        rely on the cfn-hup to reconfigure HAProxy
        '''
        new_props = json_snippet.properties(self.properties_schema,
                                            self.context)

        # Valid use cases are:
        # - Membership controlled by members property in template
        # - Empty members property in template; membership controlled by
        #   "updates" triggered from autoscaling group.
        # Mixing the two will lead to undefined behaviour.
        if (self.INSTANCES in prop_diff
                and (self.properties[self.INSTANCES] is not None
                     or new_props[self.INSTANCES] is not None)):
            cfg = self._haproxy_config(prop_diff[self.INSTANCES])

            md = self.nested()['LB_instance'].metadata_get()
            files = md['AWS::CloudFormation::Init']['config']['files']
            files['/etc/haproxy/haproxy.cfg']['content'] = cfg

            self.nested()['LB_instance'].metadata_set(md)

    def handle_delete(self):
        return self.delete_nested()

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(LoadBalancer, self).validate()
        if res:
            return res

        if (cfg.CONF.loadbalancer_template
                and not os.access(cfg.CONF.loadbalancer_template, os.R_OK)):
            msg = _('Custom LoadBalancer template can not be found')
            raise exception.StackValidationFailed(message=msg)

        health_chk = self.properties[self.HEALTH_CHECK]
        if health_chk:
            interval = float(health_chk[self.HEALTH_CHECK_INTERVAL])
            timeout = float(health_chk[self.HEALTH_CHECK_TIMEOUT])
            if interval < timeout:
                return {'Error': 'Interval must be larger than Timeout'}

    def FnGetRefId(self):
        return six.text_type(self.name)

    def _resolve_attribute(self, name):
        '''
        We don't really support any of these yet.
        '''
        if name == self.DNS_NAME:
            return self.get_output('PublicIp')
        elif name in self.attributes_schema:
            # Not sure if we should return anything for the other attribs
            # since they aren't really supported in any meaningful way
            return ''
class SaharaCluster(resource.Resource):

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, CLUSTER_TEMPLATE_ID,
        KEY_NAME, IMAGE, MANAGEMENT_NETWORK, IMAGE_ID,
        USE_AUTOCONFIG, SHARES
    ) = (
        'name', 'plugin_name', 'hadoop_version', 'cluster_template_id',
        'key_name', 'image', 'neutron_management_network', 'default_image_id',
        'use_autoconfig', 'shares'
    )

    _SHARE_KEYS = (
        SHARE_ID, PATH, ACCESS_LEVEL
    ) = (
        'id', 'path', 'access_level'
    )

    ATTRIBUTES = (
        STATUS, INFO,
    ) = (
        "status", "info",
    )

    CLUSTER_STATUSES = (
        CLUSTER_ACTIVE, CLUSTER_ERROR
    ) = (
        'Active', 'Error'
    )
    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Hadoop cluster name.'),
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ]
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        CLUSTER_TEMPLATE_ID: properties.Schema(
            properties.Schema.STRING,
            _('ID of the Cluster Template used for '
              'Node Groups and configurations.'),
            required=True,
        ),
        KEY_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Keypair added to instances to make them accessible for user.'),
            constraints=[
                constraints.CustomConstraint('nova.keypair')
            ],
        ),
        IMAGE: properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of the image used to boot Hadoop nodes.'),
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='6.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    message=_('Use property %s.') % IMAGE_ID,
                    version='2015.1',
                    previous_status=support.SupportStatus(version='2014.2'))
            ),
            constraints=[
                constraints.CustomConstraint('glance.image')
            ],
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _('Default name or UUID of the image used to boot Hadoop nodes.'),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            support_status=support.SupportStatus(version='2015.1')
        ),
        MANAGEMENT_NETWORK: properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0')
        ),
        SHARES: properties.Schema(
            properties.Schema.LIST,
            _("List of manila shares to be mounted."),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SHARE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("Id of the manila share."),
                        required=True
                    ),
                    PATH: properties.Schema(
                        properties.Schema.STRING,
                        _("Local path on each cluster node on which to mount "
                          "the share. Defaults to '/mnt/{share_id}'.")
                    ),
                    ACCESS_LEVEL: properties.Schema(
                        properties.Schema.STRING,
                        _("Governs permissions set in manila for the cluster "
                          "ips."),
                        constraints=[
                            constraints.AllowedValues(['rw', 'ro']),
                        ],
                        default='rw'
                    )
                }
            ),
            support_status=support.SupportStatus(version='6.0.0')
        )
    }

    attributes_schema = {
        STATUS: attributes.Schema(
            _("Cluster status."),
            type=attributes.Schema.STRING
        ),
        INFO: attributes.Schema(
            _("Cluster information."),
            type=attributes.Schema.MAP
        ),
    }

    default_client_name = 'sahara'

    entity = 'clusters'

    def translation_rules(self, props):
        return [properties.TranslationRule(
            props,
            properties.TranslationRule.REPLACE,
            [self.IMAGE_ID],
            value_path=[self.IMAGE]
        )]

    def _validate_depr_keys(self, properties, key, depr_key):
        value = properties.get(key)
        depr_value = properties.get(depr_key)
        if value and depr_value:
            raise exception.ResourcePropertyConflict(value, depr_value)

    def _cluster_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return self.physical_resource_name()

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID]
        image_id = (self.properties[self.IMAGE_ID] or
                    self.properties[self.IMAGE])
        if image_id:
            image_id = self.client_plugin('glance').get_image_id(image_id)

        # check that image is provided in case when
        # cluster template is missing one
        cluster_template = self.client().cluster_templates.get(
            cluster_template_id)
        if cluster_template.default_image_id is None and not image_id:
            msg = _("%(img)s must be provided: Referenced cluster template "
                    "%(tmpl)s has no default_image_id defined.") % {
                        'img': self.IMAGE, 'tmpl': cluster_template_id}
            raise exception.StackValidationFailed(message=msg)

        key_name = self.properties[self.KEY_NAME]
        net_id = self.properties[self.MANAGEMENT_NETWORK]
        if net_id:
            if self.is_using_neutron():
                net_id = self.client_plugin('neutron').find_neutron_resource(
                    self.properties, self.MANAGEMENT_NETWORK, 'network')
            else:
                net_id = self.client_plugin('nova').get_nova_network_id(
                    net_id)
        use_autoconfig = self.properties[self.USE_AUTOCONFIG]
        shares = self.properties[self.SHARES]

        cluster = self.client().clusters.create(
            self._cluster_name(),
            plugin_name, hadoop_version,
            cluster_template_id=cluster_template_id,
            user_keypair_id=key_name,
            default_image_id=image_id,
            net_id=net_id,
            use_autoconfig=use_autoconfig,
            shares=shares)
        LOG.info(_LI('Cluster "%s" is being started.'), cluster.name)
        self.resource_id_set(cluster.id)
        return self.resource_id

    def check_create_complete(self, cluster_id):
        cluster = self.client().clusters.get(cluster_id)
        if cluster.status == self.CLUSTER_ERROR:
            raise exception.ResourceInError(resource_status=cluster.status)

        if cluster.status != self.CLUSTER_ACTIVE:
            return False

        LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
        return True

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        try:
            cluster = self.client().clusters.get(resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            LOG.info(_LI("Cluster '%s' has been deleted"),
                     self._cluster_name())
            return True
        else:
            if cluster.status == self.CLUSTER_ERROR:
                raise exception.ResourceInError(resource_status=cluster.status)

        return False

    def _resolve_attribute(self, name):
        cluster = self.client().clusters.get(self.resource_id)
        return getattr(cluster, name, None)

    def validate(self):
        res = super(SaharaCluster, self).validate()

        if res:
            return res

        self._validate_depr_keys(self.properties, self.IMAGE_ID, self.IMAGE)
        # check if running on neutron and MANAGEMENT_NETWORK missing
        if (self.is_using_neutron() and
                not self.properties[self.MANAGEMENT_NETWORK]):
            msg = _("%s must be provided"
                    ) % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )