示例#1
0
class OSDBInstance(resource.Resource):
    '''
    OpenStack cloud database instance resource.
    '''

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
        REPLICA_OF,
        REPLICA_COUNT,
    ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
         'restore_point', 'datastore_type', 'datastore_version', 'networks',
         'replica_of', 'replica_count')

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IPv4 address for this NIC.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%'),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
        REPLICA_OF:
        properties.Schema(
            properties.Schema.STRING,
            _('Identifier of the source instance to replicate.'),
            support_status=support.SupportStatus(version='5.0.0')),
        REPLICA_COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of replicas to be created.'),
            support_status=support.SupportStatus(version='5.0.0')),
    }

    attributes_schema = {
        HOSTNAME:
        attributes.Schema(_("Hostname of the instance."),
                          type=attributes.Schema.STRING),
        HREF:
        attributes.Schema(_("Api endpoint reference of the instance."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'trove'

    entity = 'instances'

    def __init__(self, name, json_snippet, stack):
        super(OSDBInstance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.client().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        '''
        Create cloud database instance.
        '''
        self.flavor = self.client_plugin().get_flavor_id(
            self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties[self.DATABASES]
        self.users = self.properties[self.USERS]
        restore_point = self.properties[self.RESTORE_POINT]
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties[self.AVAILABILITY_ZONE]
        self.datastore_type = self.properties[self.DATASTORE_TYPE]
        self.datastore_version = self.properties[self.DATASTORE_VERSION]
        replica_of = self.properties[self.REPLICA_OF]
        replica_count = self.properties[self.REPLICA_COUNT]

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties[self.NICS]:
            nic_dict = {}
            net = nic.get(self.NET)
            if net:
                if self.is_using_neutron():
                    net_id = (
                        self.client_plugin('neutron').find_neutron_resource(
                            nic, self.NET, 'network'))
                else:
                    net_id = (
                        self.client_plugin('nova').get_nova_network_id(net))
                nic_dict['net-id'] = net_id
            port = nic.get(self.PORT)
            if port:
                neutron = self.client_plugin('neutron')
                nic_dict['port-id'] = neutron.find_neutron_resource(
                    nic, self.PORT, 'port')
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create db instance
        instance = self.client().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics,
            replica_of=replica_of,
            replica_count=replica_count)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.client().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warn(
                    _LW("Stack %(name)s (%(id)s) received an "
                        "OverLimit response during instance.get():"
                        " %(exception)s"), {
                            'name': self.stack.name,
                            'id': self.stack.id,
                            'exception': exc
                        })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        '''
        Check if cloud DB instance creation is complete.
        '''
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            _LI("Database instance %(database)s created (flavor:%("
                "flavor)s,volume:%(volume)s, datastore:%("
                "datastore_type)s, datastore_version:%("
                "datastore_version)s)"), {
                    'database': self._dbinstance_name(),
                    'flavor': self.flavor,
                    'volume': self.volume,
                    'datastore_type': self.datastore_type,
                    'datastore_version': self.datastore_version
                })
        return True

    def handle_check(self):
        instance = self.client().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_delete(self):
        '''
        Delete a cloud database instance.
        '''
        if not self.resource_id:
            return

        try:
            instance = self.client().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        '''
        Check for completion of cloud DB instance deletion
        '''
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(OSDBInstance, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties[self.USERS]
        if users:
            databases = self.properties[self.DATABASES]
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties[self.NICS]
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
示例#2
0
class TroveCluster(resource.Resource):

    support_status = support.SupportStatus(version='2015.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)

    PROPERTIES = (
        NAME,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        INSTANCES,
    ) = (
        'name',
        'datastore_type',
        'datastore_version',
        'instances',
    )

    _INSTANCE_KEYS = (
        FLAVOR,
        VOLUME_SIZE,
    ) = (
        'flavor',
        'volume_size',
    )

    ATTRIBUTES = (INSTANCES_ATTR, IP) = ('instances', 'ip')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the cluster to create.'),
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          required=True,
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            required=True,
            constraints=[constraints.Length(max=255)]),
        INSTANCES:
        properties.Schema(
            properties.Schema.LIST,
            _("List of database instances."),
            required=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    FLAVOR:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Flavor of the instance.'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('trove.flavor')
                        ]),
                    VOLUME_SIZE:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Size of the instance disk volume in GB.'),
                        required=True,
                        constraints=[
                            constraints.Range(1, 150),
                        ])
                }))
    }

    attributes_schema = {
        INSTANCES: attributes.Schema(_("A list of instances ids.")),
        IP: attributes.Schema(_("IP of the cluster."))
    }

    default_client_name = 'trove'

    def _cluster_name(self):
        return self.properties[self.NAME] or self.physical_resource_name()

    def handle_create(self):
        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        # convert instances to format required by troveclient
        instances = []
        for instance in self.properties[self.INSTANCES]:
            instances.append({
                'flavorRef':
                self.client_plugin().get_flavor_id(instance[self.FLAVOR]),
                'volume': {
                    'size': instance[self.VOLUME_SIZE]
                }
            })

        args = {
            'name': self._cluster_name(),
            'datastore': datastore_type,
            'datastore_version': datastore_version,
            'instances': instances
        }
        cluster = self.client().clusters.create(**args)
        self.resource_id_set(cluster.id)
        return cluster.id

    def _refresh_cluster(self, cluster_id):
        try:
            cluster = self.client().clusters.get(cluster_id)
            return cluster
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warn(
                    _LW("Stack %(name)s (%(id)s) received an "
                        "OverLimit response during clusters.get():"
                        " %(exception)s"), {
                            'name': self.stack.name,
                            'id': self.stack.id,
                            'exception': exc
                        })
                return None
            else:
                raise

    def check_create_complete(self, cluster_id):
        cluster = self._refresh_cluster(cluster_id)

        if cluster is None:
            return False

        for instance in cluster.instances:
            if instance['status'] in self.BAD_STATUSES:
                raise resource.ResourceInError(
                    resource_status=instance['status'],
                    status_reason=self.TROVE_STATUS_REASON.get(
                        instance['status'], _("Unknown")))

            if instance['status'] != self.ACTIVE:
                return False

        LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
        return True

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            cluster = self.client().clusters.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            cluster.delete()
            return cluster.id

    def check_delete_complete(self, cluster_id):
        if not cluster_id:
            return True

        try:
            # For some time trove cluster may continue to live
            self._refresh_cluster(cluster_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        res = super(TroveCluster, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

    def _resolve_attribute(self, name):
        if name == self.INSTANCES_ATTR:
            instances = []
            cluster = self.client().clusters.get(self.resource_id)
            for instance in cluster.instances:
                instances.append(instance['id'])
            return instances
        elif name == self.IP:
            cluster = self.client().clusters.get(self.resource_id)
            return cluster.ip
示例#3
0
文件: order.py 项目: kanaka/heat
class Order(resource.Resource):

    PROPERTIES = (
        NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION,
        ALGORITHM, BIT_LENGTH,
    ) = (
        'name', 'payload_content_type', 'mode', 'expiration',
        'algorithm', 'bit_length',
    )

    ATTRIBUTES = (
        STATUS, ORDER_REF, SECRET_REF,
    ) = (
        'status', 'order_ref', 'secret_ref',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Human readable name for the secret.'),
        ),
        PAYLOAD_CONTENT_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('The type/format the secret data is provided in.'),
            default='application/octet-stream',
            constraints=[
                constraints.AllowedValues([
                    'application/octet-stream',
                ]),
            ],
        ),
        EXPIRATION: properties.Schema(
            properties.Schema.STRING,
            _('The expiration date for the secret in ISO-8601 format.'),
            constraints=[
                constraints.CustomConstraint('iso_8601'),
            ],
        ),
        ALGORITHM: properties.Schema(
            properties.Schema.STRING,
            _('The algorithm type used to generate the secret.'),
            default='aes',
            constraints=[
                constraints.AllowedValues([
                    'aes',
                ]),
            ],
        ),
        BIT_LENGTH: properties.Schema(
            properties.Schema.NUMBER,
            _('The bit-length of the secret.'),
            constraints=[
                constraints.AllowedValues([
                    128,
                    196,
                    256,
                ]),
            ],
        ),
        MODE: properties.Schema(
            properties.Schema.STRING,
            _('The type/mode of the algorithm associated with the secret '
              'information.'),
            default='cbc',
            constraints=[
                constraints.AllowedValues([
                    'cbc',
                ]),
            ],
        ),
    }

    attributes_schema = {
        STATUS: attributes.Schema(_('The status of the order.')),
        ORDER_REF: attributes.Schema(_('The URI to the order.')),
        SECRET_REF: attributes.Schema(_('The URI to the created secret.')),
    }

    def barbican(self):
        return self.client('barbican')

    def handle_create(self):
        info = dict(self.properties)
        order = self.barbican().orders.create_key(**info)
        order_ref = order.submit()
        self.resource_id_set(order_ref)
        return order_ref

    def check_create_complete(self, order_href):
        order = self.barbican().orders.get(order_href)

        if order.status == 'ERROR':
            reason = order.error_reason
            code = order.error_status_code
            msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s")
                   % {'name': self.name, 'code': code, 'reason': reason})
            raise exception.Error(msg)

        return order.status == 'ACTIVE'

    def handle_delete(self):
        if not self.resource_id:
            return

        client = self.barbican()
        try:
            client.orders.delete(self.resource_id)
        except client.barbican_client.HTTPClientError as exc:
            # This is the only exception the client raises
            # Inspecting the message to see if it's a 'Not Found'
            if 'Not Found' not in six.text_type(exc):
                raise

    def _resolve_attribute(self, name):
        order = self.barbican().orders.get(self.resource_id)
        return getattr(order, name)
示例#4
0
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
        COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
        HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
        INSTANCE_ID,
    ) = (
        'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
        'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
        'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
        'InstanceId',
    )

    _TAG_KEYS = (
        TAG_KEY, TAG_VALUE,
    ) = (
        'Key', 'Value',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE
    ) = (
        'AutoScalingRollingUpdate'
    )

    _ROLLING_UPDATE_SCHEMA_KEYS = (
        MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
    ) = (
        'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
    )

    ATTRIBUTES = (
        INSTANCE_LIST,
    ) = (
        'InstanceList',
    )

    properties_schema = {
        AVAILABILITY_ZONES: properties.Schema(
            properties.Schema.LIST,
            _('Not Implemented.'),
            required=True
        ),
        LAUNCH_CONFIGURATION_NAME: properties.Schema(
            properties.Schema.STRING,
            _('The reference to a LaunchConfiguration resource.'),
            update_allowed=True
        ),
        INSTANCE_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of an existing instance to use to '
              'create the Auto Scaling group. If specify this property, '
              'will create the group use an existing instance instead of '
              'a launch configuration.'),
            constraints=[
                constraints.CustomConstraint("nova.server")
            ]
        ),
        MAX_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of instances in the group.'),
            required=True,
            update_allowed=True
        ),
        MIN_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Minimum number of instances in the group.'),
            required=True,
            update_allowed=True
        ),
        COOLDOWN: properties.Schema(
            properties.Schema.NUMBER,
            _('Cooldown period, in seconds.'),
            update_allowed=True
        ),
        DESIRED_CAPACITY: properties.Schema(
            properties.Schema.INTEGER,
            _('Desired initial number of instances.'),
            update_allowed=True
        ),
        HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
            properties.Schema.INTEGER,
            _('Not Implemented.'),
            implemented=False
        ),
        HEALTH_CHECK_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Not Implemented.'),
            constraints=[
                constraints.AllowedValues(['EC2', 'ELB']),
            ],
            implemented=False
        ),
        LOAD_BALANCER_NAMES: properties.Schema(
            properties.Schema.LIST,
            _('List of LoadBalancer resources.')
        ),
        VPCZONE_IDENTIFIER: properties.Schema(
            properties.Schema.LIST,
            _('Use only with Neutron, to list the internal subnet to '
              'which the instance will be attached; '
              'needed only if multiple exist; '
              'list length must be exactly 1.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('UUID of the internal subnet to which the instance '
                  'will be attached.')
            )
        ),
        TAGS: properties.Schema(
            properties.Schema.LIST,
            _('Tags to attach to this group.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TAG_KEY: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                    TAG_VALUE: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                },
            )
        ),
    }

    attributes_schema = {
        INSTANCE_LIST: attributes.Schema(
            _("A comma-delimited list of server ip addresses. "
              "(Heat extension).")
        ),
    }

    rolling_update_schema = {
        MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
                                                    default=0),
        MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
                                          default=1),
        PAUSE_TIME: properties.Schema(properties.Schema.STRING,
                                      default='PT0S')
    }

    update_policy_schema = {
        ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
                                          schema=rolling_update_schema)
    }

    def handle_create(self):
        self.validate_launchconfig()
        return self.create_with_template(self.child_template())

    def _make_launch_config_resource(self, name, props):
        lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
        lc_res_def = rsrc_defn.ResourceDefinition(name,
                                                  lc_res_type,
                                                  props)
        lc_res = resource.Resource(name, lc_res_def, self.stack)
        return lc_res

    def _get_conf_properties(self):
        instance_id = self.properties.get(self.INSTANCE_ID)
        if instance_id:
            server = self.client_plugin('nova').get_server(instance_id)
            instance_props = {
                'ImageId': server.image['id'],
                'InstanceType': server.flavor['id'],
                'KeyName': server.key_name,
                'SecurityGroups': [sg['name']
                                   for sg in server.security_groups]
            }
            conf = self._make_launch_config_resource(self.name,
                                                     instance_props)
            props = function.resolve(conf.properties.data)
        else:
            conf, props = super(AutoScalingGroup, self)._get_conf_properties()

        vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
        if vpc_zone_ids:
            props['SubnetId'] = vpc_zone_ids[0]

        return conf, props

    def check_create_complete(self, task):
        """Invoke the cooldown after creation succeeds."""
        done = super(AutoScalingGroup, self).check_create_complete(task)
        if done:
            self._cooldown_timestamp(
                "%s : %s" % (EXACT_CAPACITY, grouputils.get_size(self)))
        return done

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """
        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if tmpl_diff:
            # parse update policy
            if 'UpdatePolicy' in tmpl_diff:
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff:
            # Replace instances first if launch configuration has changed
            self._try_rolling_update(prop_diff)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            self.adjust(self.properties[self.DESIRED_CAPACITY],
                        adjustment_type=EXACT_CAPACITY)
        else:
            current_capacity = grouputils.get_size(self)
            self.adjust(current_capacity, adjustment_type=EXACT_CAPACITY)

    def adjust(self, adjustment, adjustment_type=CHANGE_IN_CAPACITY):
        """
        Adjust the size of the scaling group if the cooldown permits.
        """
        if self._cooldown_inprogress():
            LOG.info(_LI("%(name)s NOT performing scaling adjustment, "
                         "cooldown %(cooldown)s"),
                     {'name': self.name,
                      'cooldown': self.properties[self.COOLDOWN]})
            return

        capacity = grouputils.get_size(self)
        lower = self.properties[self.MIN_SIZE]
        upper = self.properties[self.MAX_SIZE]

        new_capacity = _calculate_new_capacity(capacity, adjustment,
                                               adjustment_type, lower, upper)

        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()},
            'suffix': 'start',
        }
        notification.send(**notif)
        try:
            self.resize(new_capacity)
        except Exception as resize_ex:
            with excutils.save_and_reraise_exception():
                try:
                    notif.update({'suffix': 'error',
                                  'message': six.text_type(resize_ex),
                                  })
                    notification.send(**notif)
                except Exception:
                    LOG.exception(_LE('Failed sending error notification'))
        else:
            notif.update({
                'suffix': 'end',
                'capacity': new_capacity,
                'message': _("End resizing the group %(group)s") % {
                    'group': notif['groupname']},
            })
            notification.send(**notif)

        self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))

    def _tags(self):
        """Add Identifing Tags to all servers in the group.

        This is so the Dimensions received from cfn-push-stats all include
        the groupname and stack id.
        Note: the group name must match what is returned from FnGetRefId
        """
        autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
                            self.TAG_VALUE: self.FnGetRefId()}]
        return super(AutoScalingGroup, self)._tags() + autoscaling_tag

    def validate(self):
        # check validity of group size
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]

        if max_size < min_size:
            msg = _("MinSize can not be greater than MaxSize")
            raise exception.StackValidationFailed(message=msg)

        if min_size < 0:
            msg = _("The size of AutoScalingGroup can not be less than zero")
            raise exception.StackValidationFailed(message=msg)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            desired_capacity = self.properties[self.DESIRED_CAPACITY]
            if desired_capacity < min_size or desired_capacity > max_size:
                msg = _("DesiredCapacity must be between MinSize and MaxSize")
                raise exception.StackValidationFailed(message=msg)

        # TODO(pasquier-s): once Neutron is able to assign subnets to
        # availability zones, it will be possible to specify multiple subnets.
        # For now, only one subnet can be specified. The bug #1096017 tracks
        # this issue.
        if (self.properties.get(self.VPCZONE_IDENTIFIER) and
                len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
            raise exception.NotSupported(feature=_("Anything other than one "
                                         "VPCZoneIdentifier"))
        # validate properties InstanceId and LaunchConfigurationName
        # for aws auto scaling group.
        # should provide just only one of
        if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
            instanceId = self.properties.get(self.INSTANCE_ID)
            launch_config = self.properties.get(
                self.LAUNCH_CONFIGURATION_NAME)
            if bool(instanceId) == bool(launch_config):
                msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
                        "must be provided.")
                raise exception.StackValidationFailed(message=msg)

        super(AutoScalingGroup, self).validate()

    def _resolve_attribute(self, name):
        '''
        heat extension: "InstanceList" returns comma delimited list of server
        ip addresses.
        '''
        if name == self.INSTANCE_LIST:
            return u','.join(inst.FnGetAtt('PublicIp')
                             for inst in grouputils.get_members(self)) or None

    def child_template(self):
        if self.properties[self.DESIRED_CAPACITY]:
            num_instances = self.properties[self.DESIRED_CAPACITY]
        else:
            num_instances = self.properties[self.MIN_SIZE]
        return self._create_template(num_instances)
示例#5
0
class Pool(neutron.NeutronResource):
    """A resource for managing load balancer pools in Neutron."""

    required_service_extension = 'lbaas'

    PROPERTIES = (
        PROTOCOL, SUBNET_ID, SUBNET, LB_METHOD, NAME, DESCRIPTION,
        ADMIN_STATE_UP, VIP, MONITORS, PROVIDER,
    ) = (
        'protocol', 'subnet_id', 'subnet', 'lb_method', 'name', 'description',
        'admin_state_up', 'vip', 'monitors', 'provider',
    )

    _VIP_KEYS = (
        VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS,
        VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT,
        VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP,
    ) = (
        'name', 'description', 'subnet', 'address',
        'connection_limit', 'protocol_port',
        'session_persistence', 'admin_state_up',
    )

    _VIP_SESSION_PERSISTENCE_KEYS = (
        VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME,
    ) = (
        'type', 'cookie_name',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR, NAME_ATTR, PROTOCOL_ATTR, SUBNET_ID_ATTR,
        LB_METHOD_ATTR, DESCRIPTION_ATTR, TENANT_ID, VIP_ATTR, PROVIDER_ATTR,
    ) = (
        'admin_state_up', 'name', 'protocol', 'subnet_id',
        'lb_method', 'description', 'tenant_id', 'vip', 'provider',
    )

    properties_schema = {
        PROTOCOL: properties.Schema(
            properties.Schema.STRING,
            _('Protocol for balancing.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        SUBNET_ID: properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % SUBNET,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    version='2014.2'
                )
            ),
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ]
        ),
        SUBNET: properties.Schema(
            properties.Schema.STRING,
            _('The subnet for the port on which the members '
              'of the pool will be connected.'),
            support_status=support.SupportStatus(version='2014.2'),
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ]
        ),
        LB_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The algorithm used to distribute load between the members of '
              'the pool.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ROUND_ROBIN',
                                           'LEAST_CONNECTIONS', 'SOURCE_IP']),
            ],
            update_allowed=True
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the pool.')
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the pool.'),
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of this pool.'),
            default=True,
            update_allowed=True
        ),
        PROVIDER: properties.Schema(
            properties.Schema.STRING,
            _('LBaaS provider to implement this load balancer instance.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        VIP: properties.Schema(
            properties.Schema.MAP,
            _('IP address and port of the pool.'),
            schema={
                VIP_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Name of the vip.')
                ),
                VIP_DESCRIPTION: properties.Schema(
                    properties.Schema.STRING,
                    _('Description of the vip.')
                ),
                VIP_SUBNET: properties.Schema(
                    properties.Schema.STRING,
                    _('Subnet of the vip.'),
                    constraints=[
                        constraints.CustomConstraint('neutron.subnet')
                    ]
                ),
                VIP_ADDRESS: properties.Schema(
                    properties.Schema.STRING,
                    _('IP address of the vip.'),
                    constraints=[
                        constraints.CustomConstraint('ip_addr')
                    ]
                ),
                VIP_CONNECTION_LIMIT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('The maximum number of connections per second '
                      'allowed for the vip.')
                ),
                VIP_PROTOCOL_PORT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('TCP port on which to listen for client traffic '
                      'that is associated with the vip address.'),
                    required=True
                ),
                VIP_SESSION_PERSISTENCE: properties.Schema(
                    properties.Schema.MAP,
                    _('Configuration of session persistence.'),
                    schema={
                        VIP_SESSION_PERSISTENCE_TYPE: properties.Schema(
                            properties.Schema.STRING,
                            _('Method of implementation of session '
                              'persistence feature.'),
                            required=True,
                            constraints=[constraints.AllowedValues(
                                ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
                            )]
                        ),
                        VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
                            properties.Schema.STRING,
                            _('Name of the cookie, '
                              'required if type is APP_COOKIE.')
                        )
                    }
                ),
                VIP_ADMIN_STATE_UP: properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('The administrative state of this vip.'),
                    default=True
                ),
            },
            required=True
        ),
        MONITORS: properties.Schema(
            properties.Schema.LIST,
            _('List of health monitors associated with the pool.'),
            default=[],
            update_allowed=True
        ),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR: attributes.Schema(
            _('The administrative state of this pool.'),
            type=attributes.Schema.STRING
        ),
        NAME_ATTR: attributes.Schema(
            _('Name of the pool.'),
            type=attributes.Schema.STRING
        ),
        PROTOCOL_ATTR: attributes.Schema(
            _('Protocol to balance.'),
            type=attributes.Schema.STRING
        ),
        SUBNET_ID_ATTR: attributes.Schema(
            _('The subnet for the port on which the members of the pool '
              'will be connected.'),
            type=attributes.Schema.STRING
        ),
        LB_METHOD_ATTR: attributes.Schema(
            _('The algorithm used to distribute load between the members '
              'of the pool.'),
            type=attributes.Schema.STRING
        ),
        DESCRIPTION_ATTR: attributes.Schema(
            _('Description of the pool.'),
            type=attributes.Schema.STRING
        ),
        TENANT_ID: attributes.Schema(
            _('Tenant owning the pool.'),
            type=attributes.Schema.STRING
        ),
        VIP_ATTR: attributes.Schema(
            _('Vip associated with the pool.'),
            type=attributes.Schema.MAP
        ),
        PROVIDER_ATTR: attributes.Schema(
            _('Provider implementing this load balancer instance.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING,
        ),
    }

    def translation_rules(self):
        return [
            properties.TranslationRule(
                self.properties,
                properties.TranslationRule.REPLACE,
                [self.SUBNET],
                value_path=[self.SUBNET_ID]
            )
        ]

    def validate(self):
        res = super(Pool, self).validate()
        if res:
            return res
        self._validate_depr_property_required(
            self.properties, self.SUBNET, self.SUBNET_ID)
        session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
        if session_p is None:
            # session persistence is not configured, skip validation
            return

        persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
        if persistence_type == 'APP_COOKIE':
            if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
                return

            msg = _('Property cookie_name is required, when '
                    'session_persistence type is set to APP_COOKIE.')
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        self.client_plugin().resolve_subnet(
            properties, self.SUBNET, 'subnet_id')
        vip_properties = properties.pop(self.VIP)
        monitors = properties.pop(self.MONITORS)

        pool = self.client().create_pool({'pool': properties})['pool']
        self.resource_id_set(pool['id'])

        for monitor in monitors:
            self.client().associate_health_monitor(
                pool['id'], {'health_monitor': {'id': monitor}})

        vip_arguments = self.prepare_properties(
            vip_properties,
            '%s.vip' % (self.name,))

        session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
        if session_p is not None:
            prepared_props = self.prepare_properties(session_p, None)
            vip_arguments['session_persistence'] = prepared_props

        vip_arguments['protocol'] = self.properties[self.PROTOCOL]

        if vip_arguments.get(self.VIP_SUBNET) is None:
            vip_arguments['subnet_id'] = properties[self.SUBNET_ID]
        else:
            vip_arguments['subnet_id'] = self.client_plugin().resolve_subnet(
                vip_arguments, self.VIP_SUBNET, 'subnet_id')

        vip_arguments['pool_id'] = pool['id']
        vip = self.client().create_vip({'vip': vip_arguments})['vip']

        self.metadata_set({'vip': vip['id']})

    def _show_resource(self):
        return self.client().show_pool(self.resource_id)['pool']

    def check_create_complete(self, data):
        attributes = self._show_resource()
        status = attributes['status']
        if status == 'PENDING_CREATE':
            return False
        elif status == 'ACTIVE':
            vip_attributes = self.client().show_vip(
                self.metadata_get()['vip'])['vip']
            vip_status = vip_attributes['status']
            if vip_status == 'PENDING_CREATE':
                return False
            if vip_status == 'ACTIVE':
                return True
            if vip_status == 'ERROR':
                raise exception.ResourceInError(
                    resource_status=vip_status,
                    status_reason=_('error in vip'))
            raise exception.ResourceUnknownStatus(
                resource_status=vip_status,
                result=_('Pool creation failed due to vip'))
        elif status == 'ERROR':
            raise exception.ResourceInError(
                resource_status=status,
                status_reason=_('error in pool'))
        else:
            raise exception.ResourceUnknownStatus(
                resource_status=status,
                result=_('Pool creation failed'))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.MONITORS in prop_diff:
                monitors = set(prop_diff.pop(self.MONITORS))
                old_monitors = set(self.properties[self.MONITORS])
                for monitor in old_monitors - monitors:
                    self.client().disassociate_health_monitor(
                        self.resource_id, monitor)
                for monitor in monitors - old_monitors:
                    self.client().associate_health_monitor(
                        self.resource_id, {'health_monitor': {'id': monitor}})

            if prop_diff:
                self.client().update_pool(self.resource_id,
                                          {'pool': prop_diff})

    def _resolve_attribute(self, name):
        if name == self.VIP_ATTR:
            return self.client().show_vip(self.metadata_get()['vip'])['vip']
        return super(Pool, self)._resolve_attribute(name)

    def handle_delete(self):
        if not self.resource_id:
            prg = progress.PoolDeleteProgress(True)
            return prg

        prg = progress.PoolDeleteProgress()
        if not self.metadata_get():
            prg.vip['delete_called'] = True
            prg.vip['deleted'] = True
        return prg

    def _delete_vip(self):
        return self._not_found_in_call(
            self.client().delete_vip, self.metadata_get()['vip'])

    def _check_vip_deleted(self):
        return self._not_found_in_call(
            self.client().show_vip, self.metadata_get()['vip'])

    def _delete_pool(self):
        return self._not_found_in_call(
            self.client().delete_pool, self.resource_id)

    def check_delete_complete(self, prg):
        if not prg.vip['delete_called']:
            prg.vip['deleted'] = self._delete_vip()
            prg.vip['delete_called'] = True
            return False
        if not prg.vip['deleted']:
            prg.vip['deleted'] = self._check_vip_deleted()
            return False
        if not prg.pool['delete_called']:
            prg.pool['deleted'] = self._delete_pool()
            prg.pool['delete_called'] = True
            return prg.pool['deleted']
        if not prg.pool['deleted']:
            prg.pool['deleted'] = super(Pool, self).check_delete_complete(True)
            return prg.pool['deleted']
        return True
示例#6
0
class SaharaCluster(resource.Resource):

    PROPERTIES = (
        NAME,
        PLUGIN_NAME,
        HADOOP_VERSION,
        CLUSTER_TEMPLATE_ID,
        KEY_NAME,
        IMAGE,
        MANAGEMENT_NETWORK,
    ) = (
        'name',
        'plugin_name',
        'hadoop_version',
        'cluster_template_id',
        'key_name',
        'image',
        'neutron_management_network',
    )

    ATTRIBUTES = (
        STATUS,
        INFO,
    ) = (
        "status",
        "info",
    )

    CLUSTER_STATUSES = (CLUSTER_ACTIVE, CLUSTER_ERROR) = ('Active', 'Error')
    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Hadoop cluster name.'),
        ),
        PLUGIN_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        CLUSTER_TEMPLATE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of the Cluster Template used for '
              'Node Groups and configurations.'),
            required=True,
        ),
        KEY_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Keypair added to instances to make them accessible for user.'),
            constraints=[constraints.CustomConstraint('nova.keypair')],
        ),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of the image used to boot Hadoop nodes.'),
            constraints=[constraints.CustomConstraint('glance.image')],
        ),
        MANAGEMENT_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of Neutron network.'),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
    }

    attributes_schema = {
        STATUS: attributes.Schema(_("Cluster status."), ),
        INFO: attributes.Schema(_("Cluster information."), ),
    }

    default_client_name = 'sahara'

    def _cluster_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name
        return self.physical_resource_name()

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID]
        image_id = self.properties.get(self.IMAGE)
        if image_id:
            image_id = self.client_plugin('glance').get_image_id(image_id)

        # check that image is provided in case when
        # cluster template is missing one
        cluster_template = self.client().cluster_templates.get(
            cluster_template_id)
        if cluster_template.default_image_id is None and not image_id:
            msg = _("%(img)s must be provided: Referenced cluster template "
                    "%(tmpl)s has no default_image_id defined.") % {
                        'img': self.IMAGE,
                        'tmpl': cluster_template_id
                    }
            raise exception.StackValidationFailed(message=msg)

        key_name = self.properties.get(self.KEY_NAME)
        net_id = self.properties.get(self.MANAGEMENT_NETWORK)
        if net_id:
            net_id = self.client_plugin('neutron').find_neutron_resource(
                self.properties, self.MANAGEMENT_NETWORK, 'network')

        cluster = self.client().clusters.create(
            self._cluster_name(),
            plugin_name,
            hadoop_version,
            cluster_template_id=cluster_template_id,
            user_keypair_id=key_name,
            default_image_id=image_id,
            net_id=net_id)
        LOG.info(_LI('Cluster "%s" is being started.'), cluster.name)
        self.resource_id_set(cluster.id)
        return self.resource_id

    def check_create_complete(self, cluster_id):
        cluster = self.client().clusters.get(cluster_id)
        if cluster.status == self.CLUSTER_ERROR:
            raise resource.ResourceInError(resource_status=cluster.status)

        if cluster.status != self.CLUSTER_ACTIVE:
            return False

        LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
        return True

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().clusters.delete(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return None

        return self.resource_id

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        try:
            cluster = self.client().clusters.get(resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            LOG.info(_LI("Cluster '%s' has been deleted"),
                     self._cluster_name())
            return True
        else:
            if cluster.status == self.CLUSTER_ERROR:
                raise resource.ResourceInError(resource_status=cluster.status)

        return False

    def _resolve_attribute(self, name):
        cluster = self.client().clusters.get(self.resource_id)
        return getattr(cluster, name, None)

    def validate(self):
        res = super(SaharaCluster, self).validate()
        if res:
            return res

        # check if running on neutron and MANAGEMENT_NETWORK missing
        # NOTE(pshchelo): on nova-network with MANAGEMENT_NETWORK present
        # overall stack validation will fail due to neutron.network constraint,
        # although the message will be not really relevant.
        if (self.is_using_neutron()
                and not self.properties.get(self.MANAGEMENT_NETWORK)):
            msg = _("%s must be provided") % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)
示例#7
0
class ElasticIp(resource.Resource):
    PROPERTIES = (
        DOMAIN,
        INSTANCE_ID,
    ) = (
        'Domain',
        'InstanceId',
    )

    ATTRIBUTES = (ALLOCATION_ID, ) = ('AllocationId', )

    properties_schema = {
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Set to "vpc" to have IP address allocation associated to your '
              'VPC.'),
            constraints=[
                constraints.AllowedValues(['vpc']),
            ]),
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.server')]),
    }

    attributes_schema = {
        ALLOCATION_ID:
        attributes.Schema(
            _('ID that AWS assigns to represent the allocation of the address '
              'for use with Amazon VPC. Returned only for VPC elastic IP '
              'addresses.')),
    }

    def __init__(self, name, json_snippet, stack):
        super(ElasticIp, self).__init__(name, json_snippet, stack)
        self.ipaddress = None

    def _ipaddress(self):
        if self.ipaddress is None and self.resource_id is not None:
            if self.properties[self.DOMAIN]:
                try:
                    ips = self.neutron().show_floatingip(self.resource_id)
                except Exception as ex:
                    self.client_plugin('neutron').ignore_not_found(ex)
                else:
                    self.ipaddress = ips['floatingip']['floating_ip_address']
            else:
                try:
                    ips = self.nova().floating_ips.get(self.resource_id)
                except Exception as e:
                    self.client_plugin('nova').ignore_not_found(e)
                else:
                    self.ipaddress = ips.ip
        return self.ipaddress or ''

    def handle_create(self):
        """Allocate a floating IP for the current tenant."""
        ips = None
        if self.properties[self.DOMAIN]:
            from heat.engine.resources import internet_gateway

            ext_net = internet_gateway.InternetGateway.get_external_network_id(
                self.neutron())
            props = {'floating_network_id': ext_net}
            ips = self.neutron().create_floatingip({'floatingip':
                                                    props})['floatingip']
            self.ipaddress = ips['floating_ip_address']
            self.resource_id_set(ips['id'])
            LOG.info(_LI('ElasticIp create %s'), str(ips))
        else:
            try:
                ips = self.nova().floating_ips.create()
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    if self.client_plugin('nova').is_not_found(e):
                        LOG.error(
                            _LE("No default floating IP pool configured."
                                " Set 'default_floating_pool' in "
                                "nova.conf."))

            if ips:
                self.ipaddress = ips.ip
                self.resource_id_set(ips.id)
                LOG.info(_LI('ElasticIp create %s'), str(ips))

        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            server = self.nova().servers.get(instance_id)
            server.add_floating_ip(self._ipaddress())

    def handle_delete(self):
        if self.resource_id is None:
            return
        # may be just create an eip when creation, or create the association
        # failed when creation, there will no association, if we attempt to
        # disassociate, an exception will raised, we need
        # to catch and ignore it, and then to deallocate the eip
        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            try:
                server = self.nova().servers.get(instance_id)
                if server:
                    server.remove_floating_ip(self._ipaddress())
            except Exception as e:
                is_not_found = self.client_plugin('nova').is_not_found(e)
                is_unprocessable_entity = self.client_plugin(
                    'nova').is_unprocessable_entity(e)

                if (not is_not_found and not is_unprocessable_entity):
                    raise

        # deallocate the eip
        if self.properties[self.DOMAIN]:
            try:
                self.neutron().delete_floatingip(self.resource_id)
            except Exception as ex:
                self.client_plugin('neutron').ignore_not_found(ex)
        else:
            try:
                self.nova().floating_ips.delete(self.resource_id)
            except Exception as e:
                self.client_plugin('nova').ignore_not_found(e)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.INSTANCE_ID in prop_diff:
                instance_id = prop_diff.get(self.INSTANCE_ID)
                if instance_id:
                    # no need to remove the floating ip from the old instance,
                    # nova does this automatically when calling
                    # add_floating_ip().
                    server = self.nova().servers.get(instance_id)
                    server.add_floating_ip(self._ipaddress())
                else:
                    # to remove the floating_ip from the old instance
                    instance_id_old = self.properties[self.INSTANCE_ID]
                    if instance_id_old:
                        server = self.nova().servers.get(instance_id_old)
                        server.remove_floating_ip(self._ipaddress())

    def FnGetRefId(self):
        return six.text_type(self._ipaddress())

    def _resolve_attribute(self, name):
        if name == self.ALLOCATION_ID:
            return six.text_type(self.resource_id)
示例#8
0
class Order(resource.Resource):
    """A resource allowing for the generation secret material by Barbican.

    The resource allows to generate some secret material. It can be, for
    example, some key or certificate. The order encapsulates the workflow
    and history for the creation of a secret. The time to generate a secret can
    vary depending on the type of secret.
    """

    support_status = support.SupportStatus(version='2014.2')

    default_client_name = 'barbican'

    entity = 'orders'

    PROPERTIES = (NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION, ALGORITHM,
                  BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN,
                  SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA,
                  PASS_PHRASE) = ('name', 'payload_content_type', 'mode',
                                  'expiration', 'algorithm', 'bit_length',
                                  'type', 'request_type', 'subject_dn',
                                  'source_container_ref', 'ca_id', 'profile',
                                  'request_data', 'pass_phrase')

    ATTRIBUTES = (STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY,
                  CERTIFICATE, INTERMEDIATES,
                  CONTAINER_REF) = ('status', 'order_ref', 'secret_ref',
                                    'public_key', 'private_key', 'certificate',
                                    'intermediates', 'container_ref')

    ORDER_TYPES = (KEY, ASYMMETRIC, CERTIFICATE) = ('key', 'asymmetric',
                                                    'certificate')

    # full-cmc is declared but not yet supported in barbican
    REQUEST_TYPES = (STORED_KEY, SIMPLE_CMC, CUSTOM) = ('stored-key',
                                                        'simple-cmc', 'custom')

    ALLOWED_PROPERTIES_FOR_TYPE = {
        KEY:
        [NAME, ALGORITHM, BIT_LENGTH, MODE, PAYLOAD_CONTENT_TYPE, EXPIRATION],
        ASYMMETRIC: [
            NAME, ALGORITHM, BIT_LENGTH, MODE, PASS_PHRASE,
            PAYLOAD_CONTENT_TYPE, EXPIRATION
        ],
        CERTIFICATE: [
            NAME, REQUEST_TYPE, SUBJECT_DN, SOURCE_CONTAINER_REF, CA_ID,
            PROFILE, REQUEST_DATA
        ]
    }

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Human readable name for the secret.'),
        ),
        PAYLOAD_CONTENT_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type/format the secret data is provided in.'),
        ),
        EXPIRATION:
        properties.Schema(
            properties.Schema.STRING,
            _('The expiration date for the secret in ISO-8601 format.'),
            constraints=[
                constraints.CustomConstraint('expiration'),
            ],
        ),
        ALGORITHM:
        properties.Schema(
            properties.Schema.STRING,
            _('The algorithm type used to generate the secret. '
              'Required for key and asymmetric types of order.'),
        ),
        BIT_LENGTH:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The bit-length of the secret. Required for key and '
              'asymmetric types of order.'),
        ),
        MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type/mode of the algorithm associated with the secret '
              'information.'),
        ),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of the order.'),
            constraints=[
                constraints.AllowedValues(ORDER_TYPES),
            ],
            required=True,
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        REQUEST_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of the certificate request.'),
            support_status=support.SupportStatus(version='5.0.0'),
            constraints=[constraints.AllowedValues(REQUEST_TYPES)]),
        SUBJECT_DN:
        properties.Schema(
            properties.Schema.STRING,
            _('The subject of the certificate request.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        SOURCE_CONTAINER_REF:
        properties.Schema(
            properties.Schema.STRING,
            _('The source of certificate request.'),
            support_status=support.SupportStatus(version='5.0.0'),
            constraints=[constraints.CustomConstraint('barbican.container')],
        ),
        CA_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The identifier of the CA to use.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        PROFILE:
        properties.Schema(
            properties.Schema.STRING,
            _('The profile of certificate to use.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        REQUEST_DATA:
        properties.Schema(
            properties.Schema.STRING,
            _('The content of the CSR. Only for certificate orders.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        PASS_PHRASE:
        properties.Schema(
            properties.Schema.STRING,
            _('The passphrase of the created key. Can be set only '
              'for asymmetric type of order.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
    }

    attributes_schema = {
        STATUS:
        attributes.Schema(_('The status of the order.'),
                          type=attributes.Schema.STRING),
        ORDER_REF:
        attributes.Schema(_('The URI to the order.'),
                          type=attributes.Schema.STRING),
        SECRET_REF:
        attributes.Schema(_('The URI to the created secret.'),
                          type=attributes.Schema.STRING),
        CONTAINER_REF:
        attributes.Schema(
            _('The URI to the created container.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        PUBLIC_KEY:
        attributes.Schema(
            _('The payload of the created public key, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        PRIVATE_KEY:
        attributes.Schema(
            _('The payload of the created private key, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        CERTIFICATE:
        attributes.Schema(
            _('The payload of the created certificate, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        INTERMEDIATES:
        attributes.Schema(
            _('The payload of the created intermediates, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
    }

    def handle_create(self):
        info = dict(
            (k, v) for k, v in self.properties.items() if v is not None)
        order = self.client().orders.create(**info)
        order_ref = order.submit()
        self.resource_id_set(order_ref)
        # NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string
        # need not to be fixed re LP bug #1393268
        return order_ref

    def validate(self):
        super(Order, self).validate()
        if self.properties[self.TYPE] != self.CERTIFICATE:
            if (self.properties[self.ALGORITHM] is None
                    or self.properties[self.BIT_LENGTH] is None):
                msg = _("Properties %(algorithm)s and %(bit_length)s are "
                        "required for %(type)s type of order.") % {
                            'algorithm': self.ALGORITHM,
                            'bit_length': self.BIT_LENGTH,
                            'type': self.properties[self.TYPE]
                        }
                raise exception.StackValidationFailed(message=msg)
        else:
            if (self.properties[self.PROFILE]
                    and not self.properties[self.CA_ID]):
                raise exception.ResourcePropertyDependency(prop1=self.PROFILE,
                                                           prop2=self.CA_ID)
        declared_props = sorted([
            k for k, v in self.properties.items()
            if k != self.TYPE and v is not None
        ])
        allowed_props = sorted(
            self.ALLOWED_PROPERTIES_FOR_TYPE[self.properties[self.TYPE]])
        diff = sorted(set(declared_props) - set(allowed_props))
        if diff:
            msg = _("Unexpected properties: %(unexpected)s. Only these "
                    "properties are allowed for %(type)s type of order: "
                    "%(allowed)s.") % {
                        'unexpected': ', '.join(diff),
                        'type': self.properties[self.TYPE],
                        'allowed': ', '.join(allowed_props)
                    }
            raise exception.StackValidationFailed(message=msg)

    def check_create_complete(self, order_href):
        order = self.client().orders.get(order_href)

        if order.status == 'ERROR':
            reason = order.error_reason
            code = order.error_status_code
            msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s") % {
                'name': self.name,
                'code': code,
                'reason': reason
            })
            raise exception.Error(msg)

        return order.status == 'ACTIVE'

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        client = self.client()
        order = client.orders.get(self.resource_id)
        if name in (self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE,
                    self.INTERMEDIATES):
            container = client.containers.get(order.container_ref)
            secret = getattr(container, name)
            return secret.payload

        return getattr(order, name)
示例#9
0
class Router(neutron.NeutronResource):
    """A resource that implements Neutron router.

    Router is a physical or virtual network device that passes network traffic
    between different networks.
    """

    required_service_extension = 'router'

    entity = 'router'

    PROPERTIES = (
        NAME,
        EXTERNAL_GATEWAY,
        VALUE_SPECS,
        ADMIN_STATE_UP,
        L3_AGENT_ID,
        L3_AGENT_IDS,
        DISTRIBUTED,
        HA,
        TAGS,
    ) = (
        'name',
        'external_gateway_info',
        'value_specs',
        'admin_state_up',
        'l3_agent_id',
        'l3_agent_ids',
        'distributed',
        'ha',
        'tags',
    )

    _EXTERNAL_GATEWAY_KEYS = (
        EXTERNAL_GATEWAY_NETWORK,
        EXTERNAL_GATEWAY_ENABLE_SNAT,
        EXTERNAL_GATEWAY_FIXED_IPS,
    ) = (
        'network',
        'enable_snat',
        'external_fixed_ips',
    )

    _EXTERNAL_GATEWAY_FIXED_IPS_KEYS = (IP_ADDRESS, SUBNET) = ('ip_address',
                                                               'subnet')

    ATTRIBUTES = (
        STATUS,
        EXTERNAL_GATEWAY_INFO_ATTR,
        NAME_ATTR,
        ADMIN_STATE_UP_ATTR,
        TENANT_ID,
    ) = (
        'status',
        'external_gateway_info',
        'name',
        'admin_state_up',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of the router.'),
                          update_allowed=True),
        EXTERNAL_GATEWAY:
        properties.Schema(
            properties.Schema.MAP,
            _('External network gateway configuration for a router.'),
            schema={
                EXTERNAL_GATEWAY_NETWORK:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the external network for the gateway.'),
                    required=True,
                    update_allowed=True),
                EXTERNAL_GATEWAY_ENABLE_SNAT:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('Enables Source NAT on the router gateway. NOTE: The '
                      'default policy setting in Neutron restricts usage of '
                      'this property to administrative users only.'),
                    update_allowed=True),
                EXTERNAL_GATEWAY_FIXED_IPS:
                properties.Schema(
                    properties.Schema.LIST,
                    _('External fixed IP addresses for the gateway.'),
                    schema=properties.Schema(
                        properties.Schema.MAP,
                        schema={
                            IP_ADDRESS:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('External fixed IP address.'),
                                constraints=[
                                    constraints.CustomConstraint('ip_addr'),
                                ]),
                            SUBNET:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('Subnet of external fixed IP address.'),
                                constraints=[
                                    constraints.CustomConstraint(
                                        'neutron.subnet')
                                ]),
                        }),
                    update_allowed=True,
                    support_status=support.SupportStatus(version='6.0.0')),
            },
            update_allowed=True),
        VALUE_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the creation request.'),
            default={},
            update_allowed=True),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of the router.'),
                          default=True,
                          update_allowed=True),
        L3_AGENT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of the L3 agent. NOTE: The default policy setting in '
              'Neutron restricts usage of this property to administrative '
              'users only.'),
            update_allowed=True,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='6.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    version='2015.1',
                    message=_('Use property %s.') % L3_AGENT_IDS,
                    previous_status=support.SupportStatus(version='2014.1'))),
        ),
        L3_AGENT_IDS:
        properties.Schema(
            properties.Schema.LIST,
            _('ID list of the L3 agent. User can specify multi-agents '
              'for highly available router. NOTE: The default policy '
              'setting in Neutron restricts usage of this property to '
              'administrative users only.'),
            schema=properties.Schema(properties.Schema.STRING, ),
            update_allowed=True,
            support_status=support.SupportStatus(version='2015.1')),
        DISTRIBUTED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Indicates whether or not to create a distributed router. '
              'NOTE: The default policy setting in Neutron restricts usage '
              'of this property to administrative users only. This property '
              'can not be used in conjunction with the L3 agent ID.'),
            support_status=support.SupportStatus(version='2015.1')),
        HA:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Indicates whether or not to create a highly available router. '
              'NOTE: The default policy setting in Neutron restricts usage '
              'of this property to administrative users only. And now neutron '
              'do not support distributed and ha at the same time.'),
            support_status=support.SupportStatus(version='2015.1')),
        TAGS:
        properties.Schema(
            properties.Schema.LIST,
            _('The tags to be added to the router.'),
            schema=properties.Schema(properties.Schema.STRING),
            update_allowed=True,
            support_status=support.SupportStatus(version='9.0.0')),
    }

    attributes_schema = {
        STATUS:
        attributes.Schema(_("The status of the router."),
                          type=attributes.Schema.STRING),
        EXTERNAL_GATEWAY_INFO_ATTR:
        attributes.Schema(_("Gateway network for the router."),
                          type=attributes.Schema.MAP),
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the router."),
                          type=attributes.Schema.STRING),
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_("Administrative state of the router."),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_("Tenant owning the router."),
                          type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        rules = [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_NETWORK],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='network'),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE, [
                    self.EXTERNAL_GATEWAY, self.EXTERNAL_GATEWAY_FIXED_IPS,
                    self.SUBNET
                ],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='subnet')
        ]
        if props.get(self.L3_AGENT_ID):
            rules.extend([
                translation.TranslationRule(props,
                                            translation.TranslationRule.ADD,
                                            [self.L3_AGENT_IDS],
                                            [props.get(self.L3_AGENT_ID)]),
                translation.TranslationRule(props,
                                            translation.TranslationRule.DELETE,
                                            [self.L3_AGENT_ID])
            ])
        return rules

    def validate(self):
        super(Router, self).validate()
        is_distributed = self.properties[self.DISTRIBUTED]
        l3_agent_id = self.properties[self.L3_AGENT_ID]
        l3_agent_ids = self.properties[self.L3_AGENT_IDS]
        is_ha = self.properties[self.HA]
        if l3_agent_id and l3_agent_ids:
            raise exception.ResourcePropertyConflict(self.L3_AGENT_ID,
                                                     self.L3_AGENT_IDS)
        # do not specific l3 agent when creating a distributed router
        if is_distributed and (l3_agent_id or l3_agent_ids):
            raise exception.ResourcePropertyConflict(
                self.DISTRIBUTED,
                "/".join([self.L3_AGENT_ID, self.L3_AGENT_IDS]))
        if is_ha and is_distributed:
            raise exception.ResourcePropertyConflict(self.DISTRIBUTED, self.HA)
        if not is_ha and l3_agent_ids and len(l3_agent_ids) > 1:
            msg = _('Non HA routers can only have one L3 agent.')
            raise exception.StackValidationFailed(message=msg)

    def add_dependencies(self, deps):
        super(Router, self).add_dependencies(deps)
        external_gw = self.properties[self.EXTERNAL_GATEWAY]
        if external_gw:
            external_gw_net = external_gw.get(self.EXTERNAL_GATEWAY_NETWORK)
            for res in six.itervalues(self.stack):
                if res.has_interface('OS::Neutron::Subnet'):
                    try:
                        subnet_net = res.properties.get(subnet.Subnet.NETWORK)
                    except (ValueError, TypeError):
                        # Properties errors will be caught later in validation,
                        # where we can report them in their proper context.
                        continue
                    if subnet_net == external_gw_net:
                        deps += (self, res)

    def _resolve_gateway(self, props):
        gateway = props.get(self.EXTERNAL_GATEWAY)
        if gateway:
            gateway['network_id'] = gateway.pop(self.EXTERNAL_GATEWAY_NETWORK)
            if gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT] is None:
                del gateway[self.EXTERNAL_GATEWAY_ENABLE_SNAT]
            if gateway[self.EXTERNAL_GATEWAY_FIXED_IPS] is None:
                del gateway[self.EXTERNAL_GATEWAY_FIXED_IPS]
            else:
                self._resolve_subnet(gateway)
        return props

    def _get_l3_agent_list(self, props):
        l3_agent_id = props.pop(self.L3_AGENT_ID, None)
        l3_agent_ids = props.pop(self.L3_AGENT_IDS, None)
        if not l3_agent_ids and l3_agent_id:
            l3_agent_ids = [l3_agent_id]

        return l3_agent_ids

    def _resolve_subnet(self, gateway):
        external_gw_fixed_ips = gateway[self.EXTERNAL_GATEWAY_FIXED_IPS]
        for fixed_ip in external_gw_fixed_ips:
            for key, value in fixed_ip.copy().items():
                if value is None:
                    fixed_ip.pop(key)
            if self.SUBNET in fixed_ip:
                fixed_ip['subnet_id'] = fixed_ip.pop(self.SUBNET)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        self._resolve_gateway(props)
        l3_agent_ids = self._get_l3_agent_list(props)
        tags = props.pop(self.TAGS, [])

        router = self.client().create_router({'router': props})['router']
        self.resource_id_set(router['id'])

        if l3_agent_ids:
            self._replace_agent(l3_agent_ids)
        if tags:
            self.set_tags(tags)

    def check_create_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def handle_delete(self):
        try:
            self.client().delete_router(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if self.EXTERNAL_GATEWAY in prop_diff:
            self._resolve_gateway(prop_diff)

        if self.L3_AGENT_IDS in prop_diff or self.L3_AGENT_ID in prop_diff:
            l3_agent_ids = self._get_l3_agent_list(prop_diff)
            self._replace_agent(l3_agent_ids)

        if self.TAGS in prop_diff:
            tags = prop_diff.pop(self.TAGS)
            self.set_tags(tags)

        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_router(self.resource_id,
                                        {'router': prop_diff})

    def _replace_agent(self, l3_agent_ids=None):
        ret = self.client().list_l3_agent_hosting_routers(self.resource_id)
        for agent in ret['agents']:
            self.client().remove_router_from_l3_agent(agent['id'],
                                                      self.resource_id)
        if l3_agent_ids:
            for l3_agent_id in l3_agent_ids:
                self.client().add_router_to_l3_agent(
                    l3_agent_id, {'router_id': self.resource_id})

    def parse_live_resource_data(self, resource_properties, resource_data):
        result = super(Router,
                       self).parse_live_resource_data(resource_properties,
                                                      resource_data)

        try:
            ret = self.client().list_l3_agent_hosting_routers(self.resource_id)
            if ret:
                result[self.L3_AGENT_IDS] = list(agent['id']
                                                 for agent in ret['agents'])
        except self.client_plugin().exceptions.Forbidden:
            # Just pass if forbidden
            pass

        gateway = resource_data.get(self.EXTERNAL_GATEWAY)
        if gateway is not None:
            result[self.EXTERNAL_GATEWAY] = {
                self.EXTERNAL_GATEWAY_NETWORK: gateway.get('network_id'),
                self.EXTERNAL_GATEWAY_ENABLE_SNAT: gateway.get('enable_snat')
            }
        return result
示例#10
0
文件: volume.py 项目: frank6866/heat
class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):

    PROPERTIES = (
        AVAILABILITY_ZONE,
        SIZE,
        SNAPSHOT_ID,
        BACKUP_ID,
        NAME,
        DESCRIPTION,
        VOLUME_TYPE,
        METADATA,
        IMAGE_REF,
        IMAGE,
        SOURCE_VOLID,
        CINDER_SCHEDULER_HINTS,
        READ_ONLY,
        MULTI_ATTACH,
    ) = (
        'availability_zone',
        'size',
        'snapshot_id',
        'backup_id',
        'name',
        'description',
        'volume_type',
        'metadata',
        'imageRef',
        'image',
        'source_volid',
        'scheduler_hints',
        'read_only',
        'multiattach',
    )

    ATTRIBUTES = (
        AVAILABILITY_ZONE_ATTR,
        SIZE_ATTR,
        SNAPSHOT_ID_ATTR,
        DISPLAY_NAME_ATTR,
        DISPLAY_DESCRIPTION_ATTR,
        VOLUME_TYPE_ATTR,
        METADATA_ATTR,
        SOURCE_VOLID_ATTR,
        STATUS,
        CREATED_AT,
        BOOTABLE,
        METADATA_VALUES_ATTR,
        ENCRYPTED_ATTR,
        ATTACHMENTS,
        MULTI_ATTACH_ATTR,
    ) = (
        'availability_zone',
        'size',
        'snapshot_id',
        'display_name',
        'display_description',
        'volume_type',
        'metadata',
        'source_volid',
        'status',
        'created_at',
        'bootable',
        'metadata_values',
        'encrypted',
        'attachments',
        'multiattach',
    )

    properties_schema = {
        AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('The availability zone in which the volume will be created.')),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('The size of the volume in GB. '
                            'On update only increase in size is supported.'),
                          update_allowed=True,
                          constraints=[
                              constraints.Range(min=1),
                          ]),
        SNAPSHOT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the snapshot to create the volume from.'),
            constraints=[constraints.CustomConstraint('cinder.snapshot')]),
        BACKUP_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the backup to create the volume from.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('cinder.backup')]),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('A name used to distinguish the volume.'),
            update_allowed=True,
        ),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('A description of the volume.'),
            update_allowed=True,
        ),
        VOLUME_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the type of volume to use, mapping to a '
              'specific backend.'),
            constraints=[constraints.CustomConstraint('cinder.vtype')],
            update_allowed=True),
        METADATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Key/value pairs to associate with the volume.'),
            update_allowed=True,
        ),
        IMAGE_REF:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the image to create the volume from.'),
                          support_status=support.SupportStatus(
                              status=support.HIDDEN,
                              message=_('Use property %s.') % IMAGE,
                              version='5.0.0',
                              previous_status=support.SupportStatus(
                                  status=support.DEPRECATED,
                                  version='2014.1'))),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the name or ID of the image to create the '
              'volume from.'),
            constraints=[constraints.CustomConstraint('glance.image')]),
        SOURCE_VOLID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the volume to use as source.'),
            constraints=[constraints.CustomConstraint('cinder.volume')]),
        CINDER_SCHEDULER_HINTS:
        properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key-value pairs specified by the client to help '
              'the Cinder scheduler creating a volume.'),
            support_status=support.SupportStatus(version='2015.1')),
        READ_ONLY:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Enables or disables read-only access mode of volume.'),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True,
        ),
        MULTI_ATTACH:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether allow the volume to be attached more than once. '
              'This property is only supported from Cinder API v2.'),
            support_status=support.SupportStatus(version='6.0.0'),
        ),
    }

    attributes_schema = {
        AVAILABILITY_ZONE_ATTR:
        attributes.Schema(
            _('The availability zone in which the volume is located.'),
            type=attributes.Schema.STRING),
        SIZE_ATTR:
        attributes.Schema(_('The size of the volume in GB.'),
                          type=attributes.Schema.STRING),
        SNAPSHOT_ID_ATTR:
        attributes.Schema(
            _('The snapshot the volume was created from, if any.'),
            type=attributes.Schema.STRING),
        DISPLAY_NAME_ATTR:
        attributes.Schema(_('Name of the volume.'),
                          type=attributes.Schema.STRING),
        DISPLAY_DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the volume.'),
                          type=attributes.Schema.STRING),
        VOLUME_TYPE_ATTR:
        attributes.Schema(
            _('The type of the volume mapping to a backend, if any.'),
            type=attributes.Schema.STRING),
        METADATA_ATTR:
        attributes.Schema(_('Key/value pairs associated with the volume.'),
                          type=attributes.Schema.STRING),
        SOURCE_VOLID_ATTR:
        attributes.Schema(_('The volume used as source, if any.'),
                          type=attributes.Schema.STRING),
        STATUS:
        attributes.Schema(_('The current status of the volume.'),
                          type=attributes.Schema.STRING),
        CREATED_AT:
        attributes.Schema(_('The timestamp indicating volume creation.'),
                          type=attributes.Schema.STRING),
        BOOTABLE:
        attributes.Schema(
            _('Boolean indicating if the volume can be booted or not.'),
            type=attributes.Schema.STRING),
        METADATA_VALUES_ATTR:
        attributes.Schema(
            _('Key/value pairs associated with the volume in raw dict form.'),
            type=attributes.Schema.MAP),
        ENCRYPTED_ATTR:
        attributes.Schema(
            _('Boolean indicating if the volume is encrypted or not.'),
            type=attributes.Schema.STRING),
        ATTACHMENTS:
        attributes.Schema(_('The list of attachments of the volume.'),
                          type=attributes.Schema.STRING),
        MULTI_ATTACH_ATTR:
        attributes.Schema(
            _('Boolean indicating whether allow the volume to be attached '
              'more than once.'),
            type=attributes.Schema.BOOLEAN,
            support_status=support.SupportStatus(version='6.0.0'),
        ),
    }

    _volume_creating_status = ['creating', 'restoring-backup', 'downloading']

    entity = 'volumes'

    def translation_rules(self):
        return [
            properties.TranslationRule(self.properties,
                                       properties.TranslationRule.REPLACE,
                                       [self.IMAGE],
                                       value_path=[self.IMAGE_REF])
        ]

    def _name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return super(CinderVolume, self)._name()

    def _description(self):
        return self.properties[self.DESCRIPTION]

    def _create_arguments(self):
        arguments = {
            'size': self.properties[self.SIZE],
            'availability_zone': self.properties[self.AVAILABILITY_ZONE],
        }

        scheduler_hints = self._scheduler_hints(
            self.properties[self.CINDER_SCHEDULER_HINTS])
        if scheduler_hints:
            arguments[self.CINDER_SCHEDULER_HINTS] = scheduler_hints

        if self.properties[self.IMAGE]:
            arguments['imageRef'] = self.client_plugin('glance').get_image_id(
                self.properties[self.IMAGE])
        elif self.properties[self.IMAGE_REF]:
            arguments['imageRef'] = self.properties[self.IMAGE_REF]

        optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
                     self.METADATA, self.MULTI_ATTACH)

        arguments.update((prop, self.properties[prop]) for prop in optionals
                         if self.properties[prop] is not None)

        return arguments

    def _resolve_attribute(self, name):
        cinder = self.client()
        vol = cinder.volumes.get(self.resource_id)
        if name == self.METADATA_ATTR:
            return six.text_type(jsonutils.dumps(vol.metadata))
        elif name == self.METADATA_VALUES_ATTR:
            return vol.metadata
        if cinder.volume_api_version >= 2:
            if name == self.DISPLAY_NAME_ATTR:
                return vol.name
            elif name == self.DISPLAY_DESCRIPTION_ATTR:
                return vol.description
        return six.text_type(getattr(vol, name))

    # TODO(huangtianhua): remove this method when bug #1479641 is fixed.
    def _show_resource(self):
        volume = self.client().volumes.get(self.resource_id)
        return volume._info

    def handle_create(self):
        vol_id = super(CinderVolume, self).handle_create()
        read_only_flag = self.properties.get(self.READ_ONLY)
        if read_only_flag is not None:
            self.client().volumes.update_readonly_flag(vol_id, read_only_flag)

        return vol_id

    def _extend_volume(self, new_size):
        try:
            self.client().volumes.extend(self.resource_id, new_size)
        except Exception as ex:
            if self.client_plugin().is_client_exception(ex):
                raise exception.Error(
                    _("Failed to extend volume %(vol)s - %(err)s") % {
                        'vol': self.resource_id,
                        'err': str(ex)
                    })
            else:
                raise
        return True

    def _check_extend_volume_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'extending':
            LOG.debug("Volume %s is being extended" % vol.id)
            return False

        if vol.status != 'available':
            LOG.info(
                _LI("Resize failed: Volume %(vol)s "
                    "is in %(status)s state."), {
                        'vol': vol.id,
                        'status': vol.status
                    })
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status, result=_('Volume resize failed'))

        LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id})
        return True

    def _backup_restore(self, vol_id, backup_id):
        try:
            self.client().restores.restore(backup_id, vol_id)
        except Exception as ex:
            if self.client_plugin().is_client_exception(ex):
                raise exception.Error(
                    _("Failed to restore volume %(vol)s from backup %(backup)s "
                      "- %(err)s") % {
                          'vol': vol_id,
                          'backup': backup_id,
                          'err': ex
                      })
            else:
                raise
        return True

    def _check_backup_restore_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'restoring-backup':
            LOG.debug("Volume %s is being restoring from backup" % vol.id)
            return False

        if vol.status != 'available':
            LOG.info(
                _LI("Restore failed: Volume %(vol)s is in %(status)s "
                    "state."), {
                        'vol': vol.id,
                        'status': vol.status
                    })
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume backup restore failed'))

        LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id})
        return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        vol = None
        cinder = self.client()
        prg_resize = None
        prg_attach = None
        prg_detach = None
        prg_backup_restore = None
        # update the name and description for cinder volume
        if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
            vol = cinder.volumes.get(self.resource_id)
            update_name = (prop_diff.get(self.NAME)
                           or self.properties[self.NAME])
            update_description = (prop_diff.get(self.DESCRIPTION)
                                  or self.properties[self.DESCRIPTION])
            kwargs = self._fetch_name_and_description(
                cinder.volume_api_version, update_name, update_description)
            cinder.volumes.update(vol, **kwargs)
        # update the metadata for cinder volume
        if self.METADATA in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            metadata = prop_diff.get(self.METADATA)
            cinder.volumes.update_all_metadata(vol, metadata)
        # retype
        if self.VOLUME_TYPE in prop_diff:
            if cinder.volume_api_version == 1:
                LOG.info(
                    _LI('Volume type update not supported '
                        'by Cinder API V1.'))
                raise exception.NotSupported(
                    feature=_('Using Cinder API V1, volume_type update'))
            else:
                if not vol:
                    vol = cinder.volumes.get(self.resource_id)
                new_vol_type = prop_diff.get(self.VOLUME_TYPE)
                cinder.volumes.retype(vol, new_vol_type, 'never')
        # update read_only access mode
        if self.READ_ONLY in prop_diff:
            flag = prop_diff.get(self.READ_ONLY)
            cinder.volumes.update_readonly_flag(self.resource_id, flag)
        # restore the volume from backup
        if self.BACKUP_ID in prop_diff:
            prg_backup_restore = progress.VolumeBackupRestoreProgress(
                vol_id=self.resource_id,
                backup_id=prop_diff.get(self.BACKUP_ID))
        # extend volume size
        if self.SIZE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)

            new_size = prop_diff[self.SIZE]
            if new_size < vol.size:
                raise exception.NotSupported(feature=_("Shrinking volume"))

            elif new_size > vol.size:
                prg_resize = progress.VolumeResizeProgress(size=new_size)
                if vol.attachments:
                    # NOTE(pshchelo):
                    # this relies on current behavior of cinder attachments,
                    # i.e. volume attachments is a list with len<=1,
                    # so the volume can be attached only to single instance,
                    # and id of attachment is the same as id of the volume
                    # it describes, so detach/attach the same volume
                    # will not change volume attachment id.
                    server_id = vol.attachments[0]['server_id']
                    device = vol.attachments[0]['device']
                    attachment_id = vol.attachments[0]['id']
                    prg_detach = progress.VolumeDetachProgress(
                        server_id, vol.id, attachment_id)
                    prg_attach = progress.VolumeAttachProgress(
                        server_id, vol.id, device)

        return prg_backup_restore, prg_detach, prg_resize, prg_attach

    def _detach_volume_to_complete(self, prg_detach):
        if not prg_detach.called:
            self.client_plugin('nova').detach_volume(prg_detach.srv_id,
                                                     prg_detach.attach_id)
            prg_detach.called = True
            return False
        if not prg_detach.cinder_complete:
            cinder_complete_res = self.client_plugin(
            ).check_detach_volume_complete(prg_detach.vol_id)
            prg_detach.cinder_complete = cinder_complete_res
            return False
        if not prg_detach.nova_complete:
            prg_detach.nova_complete = self.client_plugin(
                'nova').check_detach_volume_complete(prg_detach.srv_id,
                                                     prg_detach.attach_id)
            return False

    def _attach_volume_to_complete(self, prg_attach):
        if not prg_attach.called:
            prg_attach.called = self.client_plugin('nova').attach_volume(
                prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
            return False
        if not prg_attach.complete:
            prg_attach.complete = self.client_plugin(
            ).check_attach_volume_complete(prg_attach.vol_id)
            return prg_attach.complete

    def check_update_complete(self, checkers):
        prg_backup_restore, prg_detach, prg_resize, prg_attach = checkers
        if prg_backup_restore:
            if not prg_backup_restore.called:
                prg_backup_restore.called = self._backup_restore(
                    prg_backup_restore.vol_id, prg_backup_restore.backup_id)
                return False
            if not prg_backup_restore.complete:
                prg_backup_restore.complete = \
                    self._check_backup_restore_complete()
                return prg_backup_restore.complete and not prg_resize
        if not prg_resize:
            return True
        # detach volume
        if prg_detach:
            if not prg_detach.nova_complete:
                self._detach_volume_to_complete(prg_detach)
                return False
        # resize volume
        if not prg_resize.called:
            prg_resize.called = self._extend_volume(prg_resize.size)
            return False
        if not prg_resize.complete:
            prg_resize.complete = self._check_extend_volume_complete()
            return prg_resize.complete and not prg_attach
        # reattach volume back
        if prg_attach:
            return self._attach_volume_to_complete(prg_attach)
        return True

    def handle_snapshot(self):
        backup = self.client().backups.create(self.resource_id)
        self.data_set('backup_id', backup.id)
        return backup.id

    def check_snapshot_complete(self, backup_id):
        backup = self.client().backups.get(backup_id)
        if backup.status == 'creating':
            return False
        if backup.status == 'available':
            return True
        raise exception.Error(backup.fail_reason)

    def handle_delete_snapshot(self, snapshot):
        backup_id = snapshot['resource_data'].get('backup_id')
        if not backup_id:
            return
        try:
            self.client().backups.delete(backup_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return
        else:
            return backup_id

    def check_delete_snapshot_complete(self, backup_id):
        if not backup_id:
            return True
        try:
            self.client().backups.get(backup_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True
        else:
            return False

    def _build_exclusive_options(self):
        exclusive_options = []
        if self.properties.get(self.SNAPSHOT_ID):
            exclusive_options.append(self.SNAPSHOT_ID)
        if self.properties.get(self.SOURCE_VOLID):
            exclusive_options.append(self.SOURCE_VOLID)
        if self.properties.get(self.IMAGE):
            exclusive_options.append(self.IMAGE)
        if self.properties.get(self.IMAGE_REF):
            exclusive_options.append(self.IMAGE_REF)
        return exclusive_options

    def _validate_create_sources(self):
        exclusive_options = self._build_exclusive_options()
        size = self.properties.get(self.SIZE)
        if size is None and len(exclusive_options) != 1:
            msg = (_('If neither "%(backup_id)s" nor "%(size)s" is '
                     'provided, one and only one of '
                     '"%(image)s", "%(image_ref)s", "%(source_vol)s", '
                     '"%(snapshot_id)s" must be specified, but currently '
                     'specified options: %(exclusive_options)s.') % {
                         'backup_id': self.BACKUP_ID,
                         'size': self.SIZE,
                         'image': self.IMAGE,
                         'image_ref': self.IMAGE_REF,
                         'source_vol': self.SOURCE_VOLID,
                         'snapshot_id': self.SNAPSHOT_ID,
                         'exclusive_options': exclusive_options
                     })
            raise exception.StackValidationFailed(message=msg)
        elif size and len(exclusive_options) > 1:
            msg = (_('If "%(size)s" is provided, only one of '
                     '"%(image)s", "%(image_ref)s", "%(source_vol)s", '
                     '"%(snapshot_id)s" can be specified, but currently '
                     'specified options: %(exclusive_options)s.') % {
                         'size': self.SIZE,
                         'image': self.IMAGE,
                         'image_ref': self.IMAGE_REF,
                         'source_vol': self.SOURCE_VOLID,
                         'snapshot_id': self.SNAPSHOT_ID,
                         'exclusive_options': exclusive_options
                     })
            raise exception.StackValidationFailed(message=msg)

    def validate(self):
        """Validate provided params."""
        res = super(CinderVolume, self).validate()
        if res is not None:
            return res

        # Scheduler hints are only supported from Cinder API v2
        if (self.properties[self.CINDER_SCHEDULER_HINTS]
                and self.client().volume_api_version == 1):
            raise exception.StackValidationFailed(
                message=_('Scheduler hints are not supported by the current '
                          'volume API.'))
        # Multi attach is only supported from Cinder API v2
        if (self.properties[self.MULTI_ATTACH]
                and self.client().volume_api_version == 1):
            raise exception.StackValidationFailed(
                message=_('Multiple attach is not supported by the current '
                          'volume API. Use this property since '
                          'Cinder API v2.'))
        # can not specify both image and imageRef
        image = self.properties.get(self.IMAGE)
        imageRef = self.properties.get(self.IMAGE_REF)
        if image and imageRef:
            raise exception.ResourcePropertyConflict(self.IMAGE,
                                                     self.IMAGE_REF)
        # if not create from backup, need to check other create sources
        if not self.properties.get(self.BACKUP_ID):
            self._validate_create_sources()

    def handle_restore(self, defn, restore_data):
        backup_id = restore_data['resource_data']['backup_id']
        # we can't ignore 'size' property: if user update the size
        # of volume after snapshot, we need to change to old size
        # when restore the volume.
        ignore_props = (self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID)
        props = dict((key, value) for (
            key,
            value) in six.iteritems(defn.properties(self.properties_schema))
                     if key not in ignore_props and value is not None)
        props[self.BACKUP_ID] = backup_id
        return defn.freeze(properties=props)
示例#11
0
文件: volume.py 项目: frank6866/heat
class CinderVolumeAttachment(vb.BaseVolumeAttachment):

    PROPERTIES = (
        INSTANCE_ID,
        VOLUME_ID,
        DEVICE,
    ) = (
        'instance_uuid',
        'volume_id',
        'mountpoint',
    )

    properties_schema = {
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the server to which the volume attaches.'),
            required=True,
            update_allowed=True),
        VOLUME_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the volume to be attached.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('cinder.volume')]),
        DEVICE:
        properties.Schema(
            properties.Schema.STRING,
            _('The location where the volume is exposed on the instance. This '
              'assignment may not be honored and it is advised that the path '
              '/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
            update_allowed=True),
    }

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        prg_attach = None
        prg_detach = None
        if prop_diff:
            # Even though some combinations of changed properties
            # could be updated in UpdateReplace manner,
            # we still first detach the old resource so that
            # self.resource_id is not replaced prematurely
            volume_id = self.properties[self.VOLUME_ID]
            server_id = self._stored_properties_data.get(self.INSTANCE_ID)
            self.client_plugin('nova').detach_volume(server_id,
                                                     self.resource_id)
            prg_detach = progress.VolumeDetachProgress(server_id, volume_id,
                                                       self.resource_id)
            prg_detach.called = True

            if self.VOLUME_ID in prop_diff:
                volume_id = prop_diff.get(self.VOLUME_ID)

            device = (self.properties[self.DEVICE]
                      if self.properties[self.DEVICE] else None)
            if self.DEVICE in prop_diff:
                device = (prop_diff[self.DEVICE]
                          if prop_diff[self.DEVICE] else None)

            if self.INSTANCE_ID in prop_diff:
                server_id = prop_diff.get(self.INSTANCE_ID)
            prg_attach = progress.VolumeAttachProgress(server_id, volume_id,
                                                       device)

        return prg_detach, prg_attach

    def check_update_complete(self, checkers):
        prg_detach, prg_attach = checkers
        if not (prg_detach and prg_attach):
            return True
        if not prg_detach.cinder_complete:
            prg_detach.cinder_complete = self.client_plugin(
            ).check_detach_volume_complete(prg_detach.vol_id)
            return False
        if not prg_detach.nova_complete:
            prg_detach.nova_complete = self.client_plugin(
                'nova').check_detach_volume_complete(prg_detach.srv_id,
                                                     self.resource_id)
            return False
        if not prg_attach.called:
            prg_attach.called = self.client_plugin('nova').attach_volume(
                prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
            return False
        if not prg_attach.complete:
            prg_attach.complete = self.client_plugin(
            ).check_attach_volume_complete(prg_attach.vol_id)
            if prg_attach.complete:
                self.resource_id_set(prg_attach.called)
            return prg_attach.complete
        return True
示例#12
0
class NetworkGateway(neutron.NeutronResource):
    """Network Gateway resource in Neutron Network Gateway."""

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        NAME,
        DEVICES,
        CONNECTIONS,
    ) = (
        'name',
        'devices',
        'connections',
    )

    ATTRIBUTES = (DEFAULT, ) = ('default', )

    _DEVICES_KEYS = (
        ID,
        INTERFACE_NAME,
    ) = (
        'id',
        'interface_name',
    )

    _CONNECTIONS_KEYS = (
        NETWORK_ID,
        NETWORK,
        SEGMENTATION_TYPE,
        SEGMENTATION_ID,
    ) = (
        'network_id',
        'network',
        'segmentation_type',
        'segmentation_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          description=_('The name of the network gateway.'),
                          update_allowed=True),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Device info for this network gateway.'),
            required=True,
            constraints=[constraints.Length(min=1)],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ID:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The device id for the network '
                                          'gateway.'),
                                      required=True),
                    INTERFACE_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The interface name for the '
                                          'network gateway.'),
                                      required=True)
                })),
        CONNECTIONS:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Connection info for this network gateway.'),
            default={},
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            message=_('Use property %s.') % NETWORK,
                            version='5.0.0',
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED, version='2014.2')),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    NETWORK:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_('The internal network to connect on '
                                      'the network gateway.'),
                        support_status=support.SupportStatus(version='2014.2'),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    SEGMENTATION_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'L2 segmentation strategy on the external '
                            'side of the network gateway.'),
                        default='flat',
                        constraints=[
                            constraints.AllowedValues(('flat', 'vlan'))
                        ]),
                    SEGMENTATION_ID:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        description=_(
                            'The id for L2 segment on the external side '
                            'of the network gateway. Must be specified '
                            'when using vlan.'),
                        constraints=[constraints.Range(0, 4094)])
                }))
    }

    attributes_schema = {
        DEFAULT:
        attributes.Schema(_("A boolean value of default flag."),
                          type=attributes.Schema.STRING),
    }

    def translation_rules(self):
        return [
            properties.TranslationRule(self.properties,
                                       properties.TranslationRule.REPLACE,
                                       [self.CONNECTIONS, self.NETWORK],
                                       value_name=self.NETWORK_ID)
        ]

    def _show_resource(self):
        return self.client().show_network_gateway(
            self.resource_id)['network_gateway']

    def validate(self):
        """Validate any of the provided params."""
        super(NetworkGateway, self).validate()
        connections = self.properties[self.CONNECTIONS]

        for connection in connections:
            self._validate_depr_property_required(connection, self.NETWORK,
                                                  self.NETWORK_ID)
            segmentation_type = connection[self.SEGMENTATION_TYPE]
            segmentation_id = connection.get(self.SEGMENTATION_ID)

            if segmentation_type == 'vlan' and segmentation_id is None:
                msg = _("segmentation_id must be specified for using vlan")
                raise exception.StackValidationFailed(message=msg)

            if segmentation_type == 'flat' and segmentation_id:
                msg = _("segmentation_id cannot be specified except 0 for "
                        "using flat")
                raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        connections = props.pop(self.CONNECTIONS)
        ret = self.client().create_network_gateway({'network_gateway':
                                                    props})['network_gateway']

        self.resource_id_set(ret['id'])

        for connection in connections:
            self.client_plugin().resolve_network(connection, self.NETWORK,
                                                 'network_id')
            if self.NETWORK in six.iterkeys(connection):
                connection.pop(self.NETWORK)
            self.client().connect_network_gateway(ret['id'], connection)

    def handle_delete(self):
        if not self.resource_id:
            return

        connections = self.properties[self.CONNECTIONS]
        for connection in connections:
            try:
                self.client_plugin().resolve_network(connection, self.NETWORK,
                                                     'network_id')
                if self.NETWORK in six.iterkeys(connection):
                    connection.pop(self.NETWORK)
                self.client().disconnect_network_gateway(
                    self.resource_id, connection)
            except Exception as ex:
                self.client_plugin().ignore_not_found(ex)

        try:
            self.client().delete_network_gateway(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        props = self.prepare_update_properties(json_snippet)
        connections = props.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(props)
            self.handle_create()
            return
        else:
            props.pop(self.DEVICES, None)

        if self.NAME in prop_diff:
            self.client().update_network_gateway(self.resource_id,
                                                 {'network_gateway': props})

        if self.CONNECTIONS in prop_diff:
            for connection in self.properties[self.CONNECTIONS]:
                try:
                    self.client_plugin().resolve_network(
                        connection, self.NETWORK, 'network_id')
                    if self.NETWORK in six.iterkeys(connection):
                        connection.pop(self.NETWORK)
                    self.client().disconnect_network_gateway(
                        self.resource_id, connection)
                except Exception as ex:
                    self.client_plugin().ignore_not_found(ex)
            for connection in connections:
                self.client_plugin().resolve_network(connection, self.NETWORK,
                                                     'network_id')
                if self.NETWORK in six.iterkeys(connection):
                    connection.pop(self.NETWORK)
                self.client().connect_network_gateway(self.resource_id,
                                                      connection)
示例#13
0
class KeystoneRoleAssignmentMixin(object):
    """Implements role assignments between user/groups and project/domain.

    heat_template_version: 2013-05-23

    parameters:
      ... Group or User parameters
      group_role:
        type: string
        description: role
      group_role_domain:
        type: string
        description: group role domain
      group_role_project:
        type: string
        description: group role project

    resources:
      admin_group:
        type: OS::Keystone::Group OR OS::Keystone::User
        properties:
          ... Group or User properties
          roles:
            - role: {get_param: group_role}
              domain: {get_param: group_role_domain}
            - role: {get_param: group_role}
              project: {get_param: group_role_project}
    """

    PROPERTIES = (ROLES) = ('roles')

    _ROLES_MAPPING_PROPERTIES = (ROLE, DOMAIN, PROJECT) = ('role', 'domain',
                                                           'project')

    mixin_properties_schema = {
        ROLES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of role assignments.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                _('Map between role with either project or domain.'),
                schema={
                    ROLE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Keystone role'),
                        required=True,
                        constraints=([
                            constraints.CustomConstraint('keystone.role')
                        ])),
                    PROJECT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Keystone project'),
                        constraints=([
                            constraints.CustomConstraint('keystone.project')
                        ])),
                    DOMAIN:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Keystone domain'),
                        constraints=([
                            constraints.CustomConstraint('keystone.domain')
                        ])),
                }),
            update_allowed=True)
    }

    def _add_role_assignments_to_group(self, group_id, role_assignments):
        for role_assignment in self._normalize_to_id(role_assignments):
            if role_assignment.get(self.PROJECT) is not None:
                self.client().client.roles.grant(
                    role=role_assignment.get(self.ROLE),
                    project=role_assignment.get(self.PROJECT),
                    group=group_id)
            elif role_assignment.get(self.DOMAIN) is not None:
                self.client().client.roles.grant(
                    role=role_assignment.get(self.ROLE),
                    domain=role_assignment.get(self.DOMAIN),
                    group=group_id)

    def _add_role_assignments_to_user(self, user_id, role_assignments):
        for role_assignment in self._normalize_to_id(role_assignments):
            if role_assignment.get(self.PROJECT) is not None:
                self.client().client.roles.grant(
                    role=role_assignment.get(self.ROLE),
                    project=role_assignment.get(self.PROJECT),
                    user=user_id)
            elif role_assignment.get(self.DOMAIN) is not None:
                self.client().client.roles.grant(
                    role=role_assignment.get(self.ROLE),
                    domain=role_assignment.get(self.DOMAIN),
                    user=user_id)

    def _remove_role_assignments_from_group(self, group_id, role_assignments):
        for role_assignment in self._normalize_to_id(role_assignments):
            if role_assignment.get(self.PROJECT) is not None:
                self.client().client.roles.revoke(
                    role=role_assignment.get(self.ROLE),
                    project=role_assignment.get(self.PROJECT),
                    group=group_id)
            elif role_assignment.get(self.DOMAIN) is not None:
                self.client().client.roles.revoke(
                    role=role_assignment.get(self.ROLE),
                    domain=role_assignment.get(self.DOMAIN),
                    group=group_id)

    def _remove_role_assignments_from_user(self, user_id, role_assignments):
        for role_assignment in self._normalize_to_id(role_assignments):
            if role_assignment.get(self.PROJECT) is not None:
                self.client().client.roles.revoke(
                    role=role_assignment.get(self.ROLE),
                    project=role_assignment.get(self.PROJECT),
                    user=user_id)
            elif role_assignment.get(self.DOMAIN) is not None:
                self.client().client.roles.revoke(
                    role=role_assignment.get(self.ROLE),
                    domain=role_assignment.get(self.DOMAIN),
                    user=user_id)

    def _normalize_to_id(self, role_assignment_prps):
        role_assignments = []
        if role_assignment_prps is None:
            return role_assignments

        for role_assignment in role_assignment_prps:
            role = role_assignment.get(self.ROLE)
            project = role_assignment.get(self.PROJECT)
            domain = role_assignment.get(self.DOMAIN)

            role_assignments.append({
                self.ROLE:
                self.client_plugin().get_role_id(role),
                self.PROJECT: (self.client_plugin().get_project_id(project))
                if project else None,
                self.DOMAIN: (self.client_plugin().get_domain_id(domain))
                if domain else None
            })
        return role_assignments

    def _find_differences(self, updated_prps, stored_prps):
        updated_role_project_assignments = []
        updated_role_domain_assignments = []

        # Split the properties into two set of role assignments
        # (project, domain) from updated properties
        for role_assignment in updated_prps or []:
            if role_assignment.get(self.PROJECT) is not None:
                updated_role_project_assignments.append(
                    '%s:%s' % (role_assignment[self.ROLE],
                               role_assignment[self.PROJECT]))
            elif (role_assignment.get(self.DOMAIN) is not None):
                updated_role_domain_assignments.append(
                    '%s:%s' %
                    (role_assignment[self.ROLE], role_assignment[self.DOMAIN]))

        stored_role_project_assignments = []
        stored_role_domain_assignments = []

        # Split the properties into two set of role assignments
        # (project, domain) from updated properties
        for role_assignment in (stored_prps or []):
            if role_assignment.get(self.PROJECT) is not None:
                stored_role_project_assignments.append(
                    '%s:%s' % (role_assignment[self.ROLE],
                               role_assignment[self.PROJECT]))
            elif (role_assignment.get(self.DOMAIN) is not None):
                stored_role_domain_assignments.append(
                    '%s:%s' %
                    (role_assignment[self.ROLE], role_assignment[self.DOMAIN]))

        new_role_assignments = []
        removed_role_assignments = []
        # NOTE: finding the diff of list of strings is easier by using 'set'
        #       so properties are converted to string in above sections
        # New items
        for item in (set(updated_role_project_assignments) -
                     set(stored_role_project_assignments)):
            new_role_assignments.append({
                self.ROLE: item[:item.find(':')],
                self.PROJECT: item[item.find(':') + 1:]
            })

        for item in (set(updated_role_domain_assignments) -
                     set(stored_role_domain_assignments)):
            new_role_assignments.append({
                self.ROLE: item[:item.find(':')],
                self.DOMAIN: item[item.find(':') + 1:]
            })

        # Old items
        for item in (set(stored_role_project_assignments) -
                     set(updated_role_project_assignments)):
            removed_role_assignments.append({
                self.ROLE:
                item[:item.find(':')],
                self.PROJECT:
                item[item.find(':') + 1:]
            })
        for item in (set(stored_role_domain_assignments) -
                     set(updated_role_domain_assignments)):
            removed_role_assignments.append({
                self.ROLE:
                item[:item.find(':')],
                self.DOMAIN:
                item[item.find(':') + 1:]
            })

        return new_role_assignments, removed_role_assignments

    def create_assignment(self, user_id=None, group_id=None):
        if self.properties.get(self.ROLES) is not None:
            if user_id is not None:
                self._add_role_assignments_to_user(
                    user_id, self.properties.get(self.ROLES))
            elif group_id is not None:
                self._add_role_assignments_to_group(
                    group_id, self.properties.get(self.ROLES))

    def update_assignment(self, prop_diff, user_id=None, group_id=None):
        (new_role_assignments,
         removed_role_assignments) = self._find_differences(
             prop_diff.get(self.ROLES),
             self._stored_properties_data.get(self.ROLES))

        if len(new_role_assignments) > 0:
            if user_id is not None:
                self._add_role_assignments_to_user(user_id,
                                                   new_role_assignments)
            elif group_id is not None:
                self._add_role_assignments_to_group(group_id,
                                                    new_role_assignments)

        if len(removed_role_assignments) > 0:
            if user_id is not None:
                self._remove_role_assignments_from_user(
                    user_id, removed_role_assignments)
            elif group_id is not None:
                self._remove_role_assignments_from_group(
                    group_id, removed_role_assignments)

    def delete_assignment(self, user_id=None, group_id=None):
        if self._stored_properties_data.get(self.ROLES) is not None:
            if user_id is not None:
                self._remove_role_assignments_from_user(
                    user_id, (self._stored_properties_data.get(self.ROLES)))
            elif group_id is not None:
                self._remove_role_assignments_from_group(
                    group_id, (self._stored_properties_data.get(self.ROLES)))

    def validate_assignment_properties(self):
        if self.properties.get(self.ROLES) is not None:
            for role_assignment in self.properties.get(self.ROLES):
                project = role_assignment.get(self.PROJECT)
                domain = role_assignment.get(self.DOMAIN)

                if project is not None and domain is not None:
                    raise exception.ResourcePropertyConflict(
                        self.PROJECT, self.DOMAIN)

                if project is None and domain is None:
                    msg = _('Either project or domain must be specified for'
                            ' role %s') % role_assignment.get(self.ROLE)
                    raise exception.StackValidationFailed(message=msg)
示例#14
0
class FirewallRule(neutron.NeutronResource):
    """A resource for the FirewallRule resource in Neutron FWaaS.

    FirewallRule represents a collection of attributes like ports,
    ip addresses etc. which define match criteria and action (allow, or deny)
    that needs to be taken on the matched data traffic.
    """

    required_service_extension = 'fwaas'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        SHARED,
        PROTOCOL,
        IP_VERSION,
        SOURCE_IP_ADDRESS,
        DESTINATION_IP_ADDRESS,
        SOURCE_PORT,
        DESTINATION_PORT,
        ACTION,
        ENABLED,
    ) = (
        'name',
        'description',
        'shared',
        'protocol',
        'ip_version',
        'source_ip_address',
        'destination_ip_address',
        'source_port',
        'destination_port',
        'action',
        'enabled',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        DESCRIPTION_ATTR,
        FIREWALL_POLICY_ID,
        SHARED_ATTR,
        PROTOCOL_ATTR,
        IP_VERSION_ATTR,
        SOURCE_IP_ADDRESS_ATTR,
        DESTINATION_IP_ADDRESS_ATTR,
        SOURCE_PORT_ATTR,
        DESTINATION_PORT_ATTR,
        ACTION_ATTR,
        ENABLED_ATTR,
        POSITION,
        TENANT_ID,
    ) = (
        'name',
        'description',
        'firewall_policy_id',
        'shared',
        'protocol',
        'ip_version',
        'source_ip_address',
        'destination_ip_address',
        'source_port',
        'destination_port',
        'action',
        'enabled',
        'position',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the firewall rule.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the firewall rule.'),
                          update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this rule should be shared across all tenants.'),
            default=False,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(
            properties.Schema.STRING,
            _('Protocol for the firewall rule.'),
            constraints=[
                constraints.AllowedValues(['tcp', 'udp', 'icmp', 'any']),
            ],
            default='any',
            update_allowed=True,
        ),
        IP_VERSION:
        properties.Schema(properties.Schema.STRING,
                          _('Internet protocol version.'),
                          default='4',
                          constraints=[
                              constraints.AllowedValues(['4', '6']),
                          ],
                          update_allowed=True),
        SOURCE_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Source IP address or CIDR.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('net_cidr')]),
        DESTINATION_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Destination IP address or CIDR.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('net_cidr')]),
        SOURCE_PORT:
        properties.Schema(properties.Schema.STRING,
                          _('Source port number or a range.'),
                          update_allowed=True),
        DESTINATION_PORT:
        properties.Schema(properties.Schema.STRING,
                          _('Destination port number or a range.'),
                          update_allowed=True),
        ACTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Action to be performed on the traffic matching the rule.'),
            default='deny',
            constraints=[
                constraints.AllowedValues(['allow', 'deny']),
            ],
            update_allowed=True),
        ENABLED:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Whether this rule should be enabled.'),
                          default=True,
                          update_allowed=True),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('Name for the firewall rule.'),
                          type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the firewall rule.'),
                          type=attributes.Schema.STRING),
        FIREWALL_POLICY_ID:
        attributes.Schema(_(
            'Unique identifier of the firewall policy to which this '
            'firewall rule belongs.'),
                          type=attributes.Schema.STRING),
        SHARED_ATTR:
        attributes.Schema(_('Shared status of this firewall rule.'),
                          type=attributes.Schema.STRING),
        PROTOCOL_ATTR:
        attributes.Schema(_('Protocol value for this firewall rule.'),
                          type=attributes.Schema.STRING),
        IP_VERSION_ATTR:
        attributes.Schema(_('Ip_version for this firewall rule.'),
                          type=attributes.Schema.STRING),
        SOURCE_IP_ADDRESS_ATTR:
        attributes.Schema(_('Source ip_address for this firewall rule.'),
                          type=attributes.Schema.STRING),
        DESTINATION_IP_ADDRESS_ATTR:
        attributes.Schema(_('Destination ip_address for this firewall rule.'),
                          type=attributes.Schema.STRING),
        SOURCE_PORT_ATTR:
        attributes.Schema(_('Source port range for this firewall rule.'),
                          type=attributes.Schema.STRING),
        DESTINATION_PORT_ATTR:
        attributes.Schema(_('Destination port range for this firewall rule.'),
                          type=attributes.Schema.STRING),
        ACTION_ATTR:
        attributes.Schema(_('Allow or deny action for this firewall rule.'),
                          type=attributes.Schema.STRING),
        ENABLED_ATTR:
        attributes.Schema(
            _('Indicates whether this firewall rule is enabled or not.'),
            type=attributes.Schema.STRING),
        POSITION:
        attributes.Schema(
            _('Position of the rule within the firewall policy.'),
            type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Id of the tenant owning the firewall.'),
                          type=attributes.Schema.STRING),
    }

    def _show_resource(self):
        return self.client().show_firewall_rule(
            self.resource_id)['firewall_rule']

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        if props.get(self.PROTOCOL) == 'any':
            props[self.PROTOCOL] = None
        firewall_rule = self.client().create_firewall_rule(
            {'firewall_rule': props})['firewall_rule']
        self.resource_id_set(firewall_rule['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if prop_diff.get(self.PROTOCOL) == 'any':
                prop_diff[self.PROTOCOL] = None
            self.client().update_firewall_rule(self.resource_id,
                                               {'firewall_rule': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_firewall_rule(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
示例#15
0
class SaharaJob(signal_responder.SignalResponder, resource.Resource):
    """A resource for creating Sahara Job.

    A job specifies the type of the job and lists all of the individual
    job binary objects. Can be launched using resource-signal.
    """

    support_status = support.SupportStatus(version='8.0.0')

    PROPERTIES = (NAME, TYPE, MAINS, LIBS, DESCRIPTION, DEFAULT_EXECUTION_DATA,
                  IS_PUBLIC,
                  IS_PROTECTED) = ('name', 'type', 'mains', 'libs',
                                   'description', 'default_execution_data',
                                   'is_public', 'is_protected')

    _EXECUTION_DATA_KEYS = (CLUSTER, INPUT, OUTPUT, CONFIGS, PARAMS, ARGS,
                            IS_PUBLIC,
                            INTERFACE) = ('cluster', 'input', 'output',
                                          'configs', 'params', 'args',
                                          'is_public', 'interface')

    ATTRIBUTES = (EXECUTIONS,
                  DEFAULT_EXECUTION_URL) = ('executions',
                                            'default_execution_url')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _("Name of the job."),
                          constraints=[
                              constraints.Length(min=1, max=50),
                              constraints.AllowedPattern(SAHARA_NAME_REGEX),
                          ],
                          update_allowed=True),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _("Type of the job."),
            constraints=[constraints.CustomConstraint('sahara.job_type')],
            required=True),
        MAINS:
        properties.Schema(
            properties.Schema.LIST,
            _("IDs or names of job's main job binary. In case of specific "
              "Sahara service, this property designed as a list, but accepts "
              "only one item."),
            schema=properties.Schema(
                properties.Schema.STRING,
                _("ID of job's main job binary."),
                constraints=[
                    constraints.CustomConstraint('sahara.job_binary')
                ]),
            constraints=[constraints.Length(max=1)],
            default=[]),
        LIBS:
        properties.Schema(
            properties.Schema.LIST,
            _("IDs or names of job's lib job binaries."),
            schema=properties.Schema(
                properties.Schema.STRING,
                constraints=[
                    constraints.CustomConstraint('sahara.job_binary')
                ]),
            default=[]),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of the job.'),
                          update_allowed=True),
        IS_PUBLIC:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('If True, job will be shared across the tenants.'),
                          update_allowed=True,
                          default=False),
        IS_PROTECTED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('If True, job will be protected from modifications and '
              'can not be deleted until this property is set to False.'),
            update_allowed=True,
            default=False),
        DEFAULT_EXECUTION_DATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Default execution data to use when run signal.'),
            schema={
                CLUSTER:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the cluster to run the job in.'),
                    constraints=[
                        constraints.CustomConstraint('sahara.cluster')
                    ],
                    required=True),
                INPUT:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the input data source.'),
                    constraints=[
                        constraints.CustomConstraint('sahara.data_source')
                    ]),
                OUTPUT:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the output data source.'),
                    constraints=[
                        constraints.CustomConstraint('sahara.data_source')
                    ]),
                CONFIGS:
                properties.Schema(properties.Schema.MAP,
                                  _('Config parameters to add to the job.'),
                                  default={}),
                PARAMS:
                properties.Schema(properties.Schema.MAP,
                                  _('Parameters to add to the job.'),
                                  default={}),
                ARGS:
                properties.Schema(properties.Schema.LIST,
                                  _('Arguments to add to the job.'),
                                  schema=properties.Schema(
                                      properties.Schema.STRING, ),
                                  default=[]),
                IS_PUBLIC:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('If True, execution will be shared across the tenants.'),
                    default=False),
                INTERFACE:
                properties.Schema(properties.Schema.MAP,
                                  _('Interface arguments to add to the job.'),
                                  default={})
            },
            update_allowed=True)
    }

    attributes_schema = {
        DEFAULT_EXECUTION_URL:
        attributes.Schema(_("A signed url to create execution specified in "
                            "default_execution_data property."),
                          type=attributes.Schema.STRING),
        EXECUTIONS:
        attributes.Schema(_("List of the job executions."),
                          type=attributes.Schema.LIST)
    }

    default_client_name = 'sahara'

    entity = 'jobs'

    def translation_rules(self, properties):
        return [
            translation.TranslationRule(properties,
                                        translation.TranslationRule.RESOLVE,
                                        [self.MAINS],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resource_by_name_or_id',
                                        entity='job_binaries'),
            translation.TranslationRule(properties,
                                        translation.TranslationRule.RESOLVE,
                                        [self.LIBS],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resource_by_name_or_id',
                                        entity='job_binaries'),
            translation.TranslationRule(
                properties,
                translation.TranslationRule.RESOLVE,
                [self.DEFAULT_EXECUTION_DATA, self.CLUSTER],
                client_plugin=self.client_plugin(),
                finder='find_resource_by_name_or_id',
                entity='clusters'),
            translation.TranslationRule(
                properties,
                translation.TranslationRule.RESOLVE,
                [self.DEFAULT_EXECUTION_DATA, self.INPUT],
                client_plugin=self.client_plugin(),
                finder='find_resource_by_name_or_id',
                entity='data_sources'),
            translation.TranslationRule(
                properties,
                translation.TranslationRule.RESOLVE,
                [self.DEFAULT_EXECUTION_DATA, self.OUTPUT],
                client_plugin=self.client_plugin(),
                finder='find_resource_by_name_or_id',
                entity='data_sources')
        ]

    def handle_create(self):
        args = {
            'name': self.properties[self.NAME]
            or self.physical_resource_name(),
            'type': self.properties[self.TYPE],
            # Note: sahara accepts only one main binary but schema demands
            # that it should be in a list.
            'mains': self.properties[self.MAINS],
            'libs': self.properties[self.LIBS],
            'description': self.properties[self.DESCRIPTION],
            'is_public': self.properties[self.IS_PUBLIC],
            'is_protected': self.properties[self.IS_PROTECTED]
        }

        job = self.client().jobs.create(**args)
        self.resource_id_set(job.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if self.NAME in prop_diff:
            name = prop_diff[self.NAME] or self.physical_resource_name()
            prop_diff[self.NAME] = name
        if self.DEFAULT_EXECUTION_DATA in prop_diff:
            del prop_diff[self.DEFAULT_EXECUTION_DATA]

        if prop_diff:
            self.client().jobs.update(self.resource_id, **prop_diff)

    def handle_signal(self, details):
        data = details or self.properties.get(self.DEFAULT_EXECUTION_DATA)
        execution_args = {
            'job_id': self.resource_id,
            'cluster_id': data.get(self.CLUSTER),
            'input_id': data.get(self.INPUT),
            'output_id': data.get(self.OUTPUT),
            'is_public': data.get(self.IS_PUBLIC),
            'interface': data.get(self.INTERFACE),
            'configs': {
                'configs': data.get(self.CONFIGS),
                'params': data.get(self.PARAMS),
                'args': data.get(self.ARGS)
            },
            'is_protected': False
        }
        try:
            self.client().job_executions.create(**execution_args)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)

    def handle_delete(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            job_exs = self.client().job_executions.find(id=self.resource_id)
            for ex in job_exs:
                self.client().job_executions.delete(ex.id)
        super(SaharaJob, self).handle_delete()

    def _resolve_attribute(self, name):
        if name == self.DEFAULT_EXECUTION_URL:
            return six.text_type(self._get_ec2_signed_url())
        elif name == self.EXECUTIONS:
            try:
                job_execs = self.client().job_executions.find(
                    id=self.resource_id)
            except Exception:
                return []
            return [execution.to_dict() for execution in job_execs]
示例#16
0
class RouterInterface(neutron.NeutronResource):
    """A resource for managing Neutron router interfaces.

    Router interfaces associate routers with existing subnets or ports.
    """

    required_service_extension = 'router'

    PROPERTIES = (ROUTER, ROUTER_ID, SUBNET_ID, SUBNET, PORT_ID,
                  PORT) = ('router', 'router_id', 'subnet_id', 'subnet',
                           'port_id', 'port')

    properties_schema = {
        ROUTER:
        properties.Schema(
            properties.Schema.STRING,
            _('The router.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.router')],
        ),
        ROUTER_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of the router.'),
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='6.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    message=_('Use property %s.') % ROUTER,
                    version='2015.1',
                    previous_status=support.SupportStatus(version='2013.1'))),
            constraints=[constraints.CustomConstraint('neutron.router')],
        ),
        SUBNET_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                message=_('Use property %s.') % SUBNET,
                version='5.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.subnet')]),
        SUBNET:
        properties.Schema(
            properties.Schema.STRING,
            _('The subnet, either subnet or port should be '
              'specified.'),
            constraints=[constraints.CustomConstraint('neutron.subnet')]),
        PORT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The port id, either subnet or port_id should be specified.'),
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='6.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    message=_('Use property %s.') % PORT,
                    version='2015.1',
                    previous_status=support.SupportStatus(version='2014.1'))),
            constraints=[constraints.CustomConstraint('neutron.port')]),
        PORT:
        properties.Schema(
            properties.Schema.STRING,
            _('The port, either subnet or port should be specified.'),
            support_status=support.SupportStatus(version='2015.1'),
            constraints=[constraints.CustomConstraint('neutron.port')])
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.PORT],
                                        value_path=[self.PORT_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.ROUTER],
                                        value_path=[self.ROUTER_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.SUBNET],
                                        value_path=[self.SUBNET_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.PORT],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='port'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.ROUTER],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='router'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.SUBNET],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='subnet')
        ]

    def validate(self):
        """Validate any of the provided params."""
        super(RouterInterface, self).validate()

        prop_subnet_exists = self.properties.get(self.SUBNET) is not None

        prop_port_exists = self.properties.get(self.PORT) is not None

        if prop_subnet_exists and prop_port_exists:
            raise exception.ResourcePropertyConflict(self.SUBNET, self.PORT)

        if not prop_subnet_exists and not prop_port_exists:
            raise exception.PropertyUnspecifiedError(self.SUBNET, self.PORT)

    def handle_create(self):
        router_id = dict(self.properties).get(self.ROUTER)
        key = 'subnet_id'
        value = dict(self.properties).get(self.SUBNET)
        if not value:
            key = 'port_id'
            value = dict(self.properties).get(self.PORT)
        self.client().add_interface_router(router_id, {key: value})
        self.resource_id_set('%s:%s=%s' % (router_id, key, value))

    def handle_delete(self):
        if not self.resource_id:
            return
        tokens = self.resource_id.replace('=', ':').split(':')
        if len(tokens) == 2:  # compatible with old data
            tokens.insert(1, 'subnet_id')
        (router_id, key, value) = tokens
        with self.client_plugin().ignore_not_found:
            self.client().remove_interface_router(router_id, {key: value})
示例#17
0
class Group(resource.Resource):
    """Represents a scaling group."""

    # pyrax differs drastically from the actual Auto Scale API. We'll prefer
    # the true API here, but since pyrax doesn't support the full flexibility
    # of the API, we'll have to restrict what users can provide.

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    # properties are identical to the API POST /groups.
    PROPERTIES = (
        GROUP_CONFIGURATION,
        LAUNCH_CONFIGURATION,
    ) = (
        'groupConfiguration',
        'launchConfiguration',
    )

    _GROUP_CONFIGURATION_KEYS = (
        GROUP_CONFIGURATION_MAX_ENTITIES,
        GROUP_CONFIGURATION_COOLDOWN,
        GROUP_CONFIGURATION_NAME,
        GROUP_CONFIGURATION_MIN_ENTITIES,
        GROUP_CONFIGURATION_METADATA,
    ) = (
        'maxEntities',
        'cooldown',
        'name',
        'minEntities',
        'metadata',
    )

    _LAUNCH_CONFIG_KEYS = (
        LAUNCH_CONFIG_ARGS,
        LAUNCH_CONFIG_TYPE,
    ) = (
        'args',
        'type',
    )

    _LAUNCH_CONFIG_ARGS_KEYS = (
        LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
        LAUNCH_CONFIG_ARGS_SERVER,
        LAUNCH_CONFIG_ARGS_STACK,
    ) = (
        'loadBalancers',
        'server',
        'stack',
    )

    _LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
        LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
        LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
    ) = (
        'loadBalancerId',
        'port',
    )

    _LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
        LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
        LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
        LAUNCH_CONFIG_ARGS_SERVER_METADATA,
        LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
        LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
        LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
        LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
        LAUNCH_CONFIG_ARGS_SERVER_USER_DATA,
        LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) = (
            'name',
            'flavorRef',
            'imageRef',
            'metadata',
            'personality',
            'networks',
            'diskConfig',  # technically maps to OS-DCF:diskConfig
            'key_name',
            'user_data',
            'config_drive')

    _LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
        LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID, ) = ('uuid', )

    _LAUNCH_CONFIG_ARGS_STACK_KEYS = (
        LAUNCH_CONFIG_ARGS_STACK_TEMPLATE,
        LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL,
        LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK,
        LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT, LAUNCH_CONFIG_ARGS_STACK_FILES,
        LAUNCH_CONFIG_ARGS_STACK_PARAMETERS,
        LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS) = ('template', 'template_url',
                                                  'disable_rollback',
                                                  'environment', 'files',
                                                  'parameters', 'timeout_mins')

    _launch_configuration_args_schema = {
        LAUNCH_CONFIG_ARGS_LOAD_BALANCERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of load balancers to hook the '
              'server up to. If not specified, no '
              'load balancing will be configured.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID:
                    properties.Schema(properties.Schema.STRING,
                                      _('ID of the load balancer.'),
                                      required=True),
                    LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Server port to connect the load balancer to.')),
                },
            )),
        LAUNCH_CONFIG_ARGS_SERVER:
        properties.Schema(
            properties.Schema.MAP,
            _('Server creation arguments, as accepted by the Cloud Servers '
              'server creation API.'),
            required=False,
            schema={
                LAUNCH_CONFIG_ARGS_SERVER_NAME:
                properties.Schema(properties.Schema.STRING,
                                  _('Server name.'),
                                  required=True),
                LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The ID or name of the flavor to boot onto.'),
                    constraints=[constraints.CustomConstraint('nova.flavor')],
                    required=True),
                LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The ID or name of the image to boot with.'),
                    constraints=[constraints.CustomConstraint('glance.image')],
                    required=True),
                LAUNCH_CONFIG_ARGS_SERVER_METADATA:
                properties.Schema(properties.Schema.MAP,
                                  _('Metadata key and value pairs.')),
                LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY:
                properties.Schema(properties.Schema.MAP,
                                  _('File path and contents.')),
                LAUNCH_CONFIG_ARGS_SERVER_CDRIVE:
                properties.Schema(properties.Schema.BOOLEAN,
                                  _('Enable config drive on the instance.')),
                LAUNCH_CONFIG_ARGS_SERVER_USER_DATA:
                properties.Schema(
                    properties.Schema.STRING,
                    _('User data for bootstrapping the instance.')),
                LAUNCH_CONFIG_ARGS_SERVER_NETWORKS:
                properties.Schema(
                    properties.Schema.LIST,
                    _('Networks to attach to. If unspecified, the instance '
                      'will be attached to the public Internet and private '
                      'ServiceNet networks.'),
                    schema=properties.Schema(
                        properties.Schema.MAP,
                        schema={
                            LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('UUID of network to attach to.'),
                                required=True)
                        })),
                LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Configuration specifying the partition layout. AUTO to '
                      'create a partition utilizing the entire disk, and '
                      'MANUAL to create a partition matching the source '
                      'image.'),
                    constraints=[
                        constraints.AllowedValues(['AUTO', 'MANUAL']),
                    ]),
                LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Name of a previously created SSH keypair to allow '
                      'key-based authentication to the server.')),
            },
        ),
        LAUNCH_CONFIG_ARGS_STACK:
        properties.Schema(
            properties.Schema.MAP,
            _('The attributes that Auto Scale uses to create a new stack. The '
              'attributes that you specify for the stack entity apply to all '
              'new stacks in the scaling group. Note the stack arguments are '
              'directly passed to Heat when creating a stack.'),
            schema={
                LAUNCH_CONFIG_ARGS_STACK_TEMPLATE:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The template that describes the stack. Either the '
                      'template or template_url property must be specified.'),
                ),
                LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL:
                properties.Schema(
                    properties.Schema.STRING,
                    _('A URI to a template. Either the template or '
                      'template_url property must be specified.')),
                LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('Keep the resources that have been created if the stack '
                      'fails to create. Defaults to True.'),
                    default=True),
                LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT:
                properties.Schema(
                    properties.Schema.MAP,
                    _('The environment for the stack.'),
                ),
                LAUNCH_CONFIG_ARGS_STACK_FILES:
                properties.Schema(
                    properties.Schema.MAP,
                    _('The contents of files that the template references.')),
                LAUNCH_CONFIG_ARGS_STACK_PARAMETERS:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Key/value pairs of the parameters and their values to '
                      'pass to the parameters in the template.')),
                LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS:
                properties.Schema(properties.Schema.INTEGER,
                                  _('The stack creation timeout in minutes.'))
            })
    }

    properties_schema = {
        GROUP_CONFIGURATION:
        properties.Schema(
            properties.Schema.MAP,
            _('Group configuration.'),
            schema={
                GROUP_CONFIGURATION_MAX_ENTITIES:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Maximum number of entities in this scaling group.'),
                    required=True),
                GROUP_CONFIGURATION_COOLDOWN:
                properties.Schema(
                    properties.Schema.NUMBER,
                    _('Number of seconds after capacity changes during '
                      'which further capacity changes are disabled.'),
                    required=True),
                GROUP_CONFIGURATION_NAME:
                properties.Schema(properties.Schema.STRING,
                                  _('Name of the scaling group.'),
                                  required=True),
                GROUP_CONFIGURATION_MIN_ENTITIES:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Minimum number of entities in this scaling group.'),
                    required=True),
                GROUP_CONFIGURATION_METADATA:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Arbitrary key/value metadata to associate with '
                      'this group.')),
            },
            required=True,
            update_allowed=True),
        LAUNCH_CONFIGURATION:
        properties.Schema(
            properties.Schema.MAP,
            _('Launch configuration.'),
            schema={
                LAUNCH_CONFIG_ARGS:
                properties.Schema(properties.Schema.MAP,
                                  _('Type-specific launch arguments.'),
                                  schema=_launch_configuration_args_schema,
                                  required=True),
                LAUNCH_CONFIG_TYPE:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Launch configuration method. Only launch_server and '
                      'launch_stack are currently supported.'),
                    required=True,
                    constraints=[
                        constraints.AllowedValues(
                            ['launch_server', 'launch_stack']),
                    ]),
            },
            required=True,
            update_allowed=True),
        # We don't allow scaling policies to be specified here, despite the
        # fact that the API supports it. Users should use the ScalingPolicy
        # resource.
    }

    def _get_group_config_args(self, groupconf):
        """Get the groupConfiguration-related pyrax arguments."""
        return dict(
            name=groupconf[self.GROUP_CONFIGURATION_NAME],
            cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
            min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
            max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
            metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))

    def _get_launch_config_server_args(self, launchconf):
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
        server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
        lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
        lbs = copy.deepcopy(lb_args)
        for lb in lbs:
            # if the port is not specified, the lbid must be that of a
            # RackConnectV3 lb pool.
            if not lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]:
                del lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]
                continue
            lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
            lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
        personality = server_args.get(
            self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
        if personality:
            personality = [{
                'path': k,
                'contents': v
            } for k, v in personality.items()]
        user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
        cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE)
                  or bool(user_data is not None and len(user_data.strip())))
        image_id = self.client_plugin('glance').find_image_by_name_or_id(
            server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF])
        flavor_id = self.client_plugin('nova').find_flavor_by_name_or_id(
            server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF])

        return dict(
            launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
            server_name=server_args[self.GROUP_CONFIGURATION_NAME],
            image=image_id,
            flavor=flavor_id,
            disk_config=server_args.get(
                self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
            metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
            config_drive=cdrive,
            user_data=user_data,
            personality=personality,
            networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
            load_balancers=lbs,
            key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
        )

    def _get_launch_config_stack_args(self, launchconf):
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
        stack_args = lcargs[self.LAUNCH_CONFIG_ARGS_STACK]
        return dict(
            launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
            template=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE],
            template_url=stack_args[
                self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL],
            disable_rollback=stack_args[
                self.LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK],
            environment=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT],
            files=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_FILES],
            parameters=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_PARAMETERS],
            timeout_mins=stack_args[
                self.LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS])

    def _get_launch_config_args(self, launchconf):
        """Get the launchConfiguration-related pyrax arguments."""
        if launchconf[self.LAUNCH_CONFIG_ARGS].get(
                self.LAUNCH_CONFIG_ARGS_SERVER):
            return self._get_launch_config_server_args(launchconf)
        else:
            return self._get_launch_config_stack_args(launchconf)

    def _get_create_args(self):
        """Get pyrax-style arguments for creating a scaling group."""
        args = self._get_group_config_args(
            self.properties[self.GROUP_CONFIGURATION])
        args['group_metadata'] = args.pop('metadata')
        args.update(
            self._get_launch_config_args(
                self.properties[self.LAUNCH_CONFIGURATION]))
        return args

    def handle_create(self):
        """Create the autoscaling group and set resource_id.

        The resource_id is set to the resulting group's ID.
        """
        asclient = self.auto_scale()
        group = asclient.create(**self._get_create_args())
        self.resource_id_set(str(group.id))

    def handle_check(self):
        self.auto_scale().get(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Update the group configuration and the launch configuration."""
        asclient = self.auto_scale()
        if self.GROUP_CONFIGURATION in prop_diff:
            args = self._get_group_config_args(
                prop_diff[self.GROUP_CONFIGURATION])
            asclient.replace(self.resource_id, **args)
        if self.LAUNCH_CONFIGURATION in prop_diff:
            args = self._get_launch_config_args(
                prop_diff[self.LAUNCH_CONFIGURATION])
            asclient.replace_launch_config(self.resource_id, **args)

    def handle_delete(self):
        """Delete the scaling group.

        Since Auto Scale doesn't allow deleting a group until all its servers
        are gone, we must set the minEntities and maxEntities of the group to 0
        and then keep trying the delete until Auto Scale has deleted all the
        servers and the delete will succeed.
        """
        if self.resource_id is None:
            return
        asclient = self.auto_scale()
        args = self._get_group_config_args(
            self.properties[self.GROUP_CONFIGURATION])
        args['min_entities'] = 0
        args['max_entities'] = 0
        try:
            asclient.replace(self.resource_id, **args)
        except NotFound:
            pass

    def check_delete_complete(self, result):
        """Try the delete operation until it succeeds."""
        if self.resource_id is None:
            return True
        try:
            self.auto_scale().delete(self.resource_id)
        except Forbidden:
            return False
        except NotFound:
            return True
        else:
            return True

    def _check_rackconnect_v3_pool_exists(self, pool_id):
        pools = self.client("rackconnect").list_load_balancer_pools()
        if pool_id in (p.id for p in pools):
            return True
        return False

    def validate(self):
        super(Group, self).validate()
        launchconf = self.properties[self.LAUNCH_CONFIGURATION]
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]

        server_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_SERVER)
        st_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_STACK)

        # launch_server and launch_stack are required and mutually exclusive.
        if ((not server_args and not st_args) or (server_args and st_args)):
            msg = (
                _('Must provide one of %(server)s or %(stack)s in %(conf)s') %
                {
                    'server': self.LAUNCH_CONFIG_ARGS_SERVER,
                    'stack': self.LAUNCH_CONFIG_ARGS_STACK,
                    'conf': self.LAUNCH_CONFIGURATION
                })
            raise exception.StackValidationFailed(msg)

        lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
        lbs = copy.deepcopy(lb_args)
        for lb in lbs:
            lb_port = lb.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT)
            lb_id = lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID]
            if not lb_port:
                # check if lb id is a valid RCV3 pool id
                if not self._check_rackconnect_v3_pool_exists(lb_id):
                    msg = _('Could not find RackConnectV3 pool '
                            'with id %s') % (lb_id)
                    raise exception.StackValidationFailed(msg)

        if st_args:
            st_tmpl = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE)
            st_tmpl_url = st_args.get(
                self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL)
            st_env = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT)
            # template and template_url are required and mutually exclusive.
            if ((not st_tmpl and not st_tmpl_url)
                    or (st_tmpl and st_tmpl_url)):
                msg = _('Must provide one of template or template_url.')
                raise exception.StackValidationFailed(msg)

            if st_tmpl:
                st_files = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_FILES)
                try:
                    tmpl = template_format.simple_parse(st_tmpl)
                    templatem.Template(tmpl, files=st_files, env=st_env)
                except Exception as exc:
                    msg = (_('Encountered error while loading template: %s') %
                           six.text_type(exc))
                    raise exception.StackValidationFailed(msg)

    def auto_scale(self):
        return self.client('auto_scale')
示例#18
0
class RouterGateway(neutron.NeutronResource):

    support_status = support.SupportStatus(
        status=support.HIDDEN,
        message=_('Use the `external_gateway_info` property in '
                  'the router resource to set up the gateway.'),
        version='5.0.0',
        previous_status=support.SupportStatus(status=support.DEPRECATED,
                                              version='2014.1'))

    PROPERTIES = (
        ROUTER_ID,
        NETWORK_ID,
        NETWORK,
    ) = ('router_id', 'network_id', 'network')

    properties_schema = {
        ROUTER_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of the router.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.router')]),
        NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                message=_('Use property %s.') % NETWORK,
                version='9.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('external network for the gateway.'),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.NETWORK],
                                        value_path=[self.NETWORK_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network')
        ]

    def add_dependencies(self, deps):
        super(RouterGateway, self).add_dependencies(deps)
        for resource in six.itervalues(self.stack):
            # depend on any RouterInterface in this template with the same
            # router_id as this router_id
            if resource.has_interface('OS::Neutron::RouterInterface'):
                try:
                    dep_router_id = resource.properties[RouterInterface.ROUTER]
                    router_id = self.properties[self.ROUTER_ID]
                except (ValueError, TypeError):
                    # Properties errors will be caught later in validation,
                    # where we can report them in their proper context.
                    continue
                if dep_router_id == router_id:
                    deps += (self, resource)
            # depend on any subnet in this template with the same network_id
            # as this network_id, as the gateway implicitly creates a port
            # on that subnet
            if resource.has_interface('OS::Neutron::Subnet'):
                try:
                    dep_network = resource.properties[subnet.Subnet.NETWORK]
                    network = self.properties[self.NETWORK]
                except (ValueError, TypeError):
                    # Properties errors will be caught later in validation,
                    # where we can report them in their proper context.
                    continue
                if dep_network == network:
                    deps += (self, resource)

    def handle_create(self):
        router_id = self.properties[self.ROUTER_ID]
        network_id = dict(self.properties).get(self.NETWORK)
        self.client().add_gateway_router(router_id, {'network_id': network_id})
        self.resource_id_set('%s:%s' % (router_id, network_id))

    def handle_delete(self):
        if not self.resource_id:
            return

        (router_id, network_id) = self.resource_id.split(':')
        with self.client_plugin().ignore_not_found:
            self.client().remove_gateway_router(router_id)
示例#19
0
class ElasticIpAssociation(resource.Resource):
    PROPERTIES = (
        INSTANCE_ID,
        EIP,
        ALLOCATION_ID,
        NETWORK_INTERFACE_ID,
    ) = (
        'InstanceId',
        'EIP',
        'AllocationId',
        'NetworkInterfaceId',
    )

    properties_schema = {
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP specified by EIP property.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.server')]),
        EIP:
        properties.Schema(properties.Schema.STRING,
                          _('EIP address to associate with instance.'),
                          update_allowed=True),
        ALLOCATION_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Allocation ID for VPC EIP address.'),
                          update_allowed=True),
        NETWORK_INTERFACE_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Network interface ID to associate with EIP.'),
                          update_allowed=True),
    }

    def FnGetRefId(self):
        return self.physical_resource_name_or_FnGetRefId()

    def validate(self):
        '''
        Validate any of the provided parameters
        '''
        super(ElasticIpAssociation, self).validate()
        eip = self.properties[self.EIP]
        allocation_id = self.properties[self.ALLOCATION_ID]
        instance_id = self.properties[self.INSTANCE_ID]
        ni_id = self.properties[self.NETWORK_INTERFACE_ID]
        # to check EIP and ALLOCATION_ID, should provide one of
        if bool(eip) == bool(allocation_id):
            msg = _("Either 'EIP' or 'AllocationId' must be provided.")
            raise exception.StackValidationFailed(message=msg)
        # to check if has EIP, also should specify InstanceId
        if eip and not instance_id:
            msg = _("Must specify 'InstanceId' if you specify 'EIP'.")
            raise exception.StackValidationFailed(message=msg)
        # to check InstanceId and NetworkInterfaceId, should provide
        # at least one
        if not instance_id and not ni_id:
            raise exception.PropertyUnspecifiedError('InstanceId',
                                                     'NetworkInterfaceId')

    def _get_port_info(self, ni_id=None, instance_id=None):
        port_id = None
        port_rsrc = None
        if ni_id:
            port_rsrc = self.neutron().list_ports(id=ni_id)['ports'][0]
            port_id = ni_id
        elif instance_id:
            ports = self.neutron().list_ports(device_id=instance_id)
            port_rsrc = ports['ports'][0]
            port_id = port_rsrc['id']

        return port_id, port_rsrc

    def _neutron_add_gateway_router(self, float_id, network_id):
        router = vpc.VPC.router_for_vpc(self.neutron(), network_id)
        if router is not None:
            floatingip = self.neutron().show_floatingip(float_id)
            floating_net_id = floatingip['floatingip']['floating_network_id']
            self.neutron().add_gateway_router(router['id'],
                                              {'network_id': floating_net_id})

    def _neutron_update_floating_ip(self,
                                    allocationId,
                                    port_id=None,
                                    ignore_not_found=False):
        try:
            self.neutron().update_floatingip(
                allocationId, {'floatingip': {
                    'port_id': port_id
                }})
        except Exception as e:
            if ignore_not_found:
                self.client_plugin('neutron').ignore_not_found(e)
            else:
                raise

    def _nova_remove_floating_ip(self,
                                 instance_id,
                                 eip,
                                 ignore_not_found=False):
        server = None
        try:
            server = self.nova().servers.get(instance_id)
            server.remove_floating_ip(eip)
        except Exception as e:
            is_not_found = self.client_plugin('nova').is_not_found(e)
            iue = self.client_plugin('nova').is_unprocessable_entity(e)
            if ((not ignore_not_found and is_not_found)
                    or (not is_not_found and not iue)):
                raise

        return server

    def _floatingIp_detach(self,
                           nova_ignore_not_found=False,
                           neutron_ignore_not_found=False):
        eip = self.properties[self.EIP]
        allocation_id = self.properties[self.ALLOCATION_ID]
        instance_id = self.properties[self.INSTANCE_ID]
        server = None
        if eip:
            # if has eip_old, to remove the eip_old from the instance
            server = self._nova_remove_floating_ip(instance_id, eip,
                                                   nova_ignore_not_found)
        else:
            # if hasn't eip_old, to update neutron floatingIp
            self._neutron_update_floating_ip(allocation_id, None,
                                             neutron_ignore_not_found)

        return server

    def _handle_update_eipInfo(self, prop_diff):
        eip_update = prop_diff.get(self.EIP)
        allocation_id_update = prop_diff.get(self.ALLOCATION_ID)
        instance_id = self.properties[self.INSTANCE_ID]
        ni_id = self.properties[self.NETWORK_INTERFACE_ID]
        if eip_update:
            server = self._floatingIp_detach(neutron_ignore_not_found=True)
            if server:
                # then to attach the eip_update to the instance
                server.add_floating_ip(eip_update)
                self.resource_id_set(eip_update)
        elif allocation_id_update:
            self._floatingIp_detach(nova_ignore_not_found=True)
            port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
            if not port_id or not port_rsrc:
                LOG.error(_LE('Port not specified.'))
                raise exception.NotFound(
                    _('Failed to update, can not found '
                      'port info.'))

            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(allocation_id_update, network_id)
            self._neutron_update_floating_ip(allocation_id_update, port_id)
            self.resource_id_set(allocation_id_update)

    def _handle_update_portInfo(self, prop_diff):
        instance_id_update = prop_diff.get(self.INSTANCE_ID)
        ni_id_update = prop_diff.get(self.NETWORK_INTERFACE_ID)
        eip = self.properties[self.EIP]
        allocation_id = self.properties[self.ALLOCATION_ID]
        # if update portInfo, no need to detach the port from
        # old instance/floatingip.
        if eip:
            server = self.nova().servers.get(instance_id_update)
            server.add_floating_ip(eip)
        else:
            port_id, port_rsrc = self._get_port_info(ni_id_update,
                                                     instance_id_update)
            if not port_id or not port_rsrc:
                LOG.error(_LE('Port not specified.'))
                raise exception.NotFound(
                    _('Failed to update, can not found '
                      'port info.'))

            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(allocation_id, network_id)
            self._neutron_update_floating_ip(allocation_id, port_id)

    def _validate_update_properties(self, prop_diff):
        # according to aws doc, when update allocation_id or eip,
        # if you also change the InstanceId or NetworkInterfaceId,
        # should go to Replacement flow
        if self.ALLOCATION_ID in prop_diff or self.EIP in prop_diff:
            instance_id = prop_diff.get(self.INSTANCE_ID)
            ni_id = prop_diff.get(self.NETWORK_INTERFACE_ID)

            if instance_id or ni_id:
                raise resource.UpdateReplace(self.name)

        # according to aws doc, when update the instance_id or
        # network_interface_id, if you also change the EIP or
        # ALLOCATION_ID, should go to Replacement flow
        if (self.INSTANCE_ID in prop_diff
                or self.NETWORK_INTERFACE_ID in prop_diff):
            eip = prop_diff.get(self.EIP)
            allocation_id = prop_diff.get(self.ALLOCATION_ID)
            if eip or allocation_id:
                raise resource.UpdateReplace(self.name)

    def handle_create(self):
        """Add a floating IP address to a server."""
        if self.properties[self.EIP]:
            server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
            server.add_floating_ip(self.properties[self.EIP])
            self.resource_id_set(self.properties[self.EIP])
            LOG.debug(
                'ElasticIpAssociation '
                '%(instance)s.add_floating_ip(%(eip)s)', {
                    'instance': self.properties[self.INSTANCE_ID],
                    'eip': self.properties[self.EIP]
                })
        elif self.properties[self.ALLOCATION_ID]:
            ni_id = self.properties[self.NETWORK_INTERFACE_ID]
            instance_id = self.properties[self.INSTANCE_ID]
            port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
            if not port_id or not port_rsrc:
                LOG.warn(_LW('Skipping association, resource not specified'))
                return

            float_id = self.properties[self.ALLOCATION_ID]
            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(float_id, network_id)

            self._neutron_update_floating_ip(float_id, port_id)

            self.resource_id_set(float_id)

    def handle_delete(self):
        """Remove a floating IP address from a server or port."""
        if self.resource_id is None:
            return

        if self.properties[self.EIP]:
            instance_id = self.properties[self.INSTANCE_ID]
            eip = self.properties[self.EIP]
            self._nova_remove_floating_ip(instance_id,
                                          eip,
                                          ignore_not_found=True)
        elif self.properties[self.ALLOCATION_ID]:
            float_id = self.properties[self.ALLOCATION_ID]
            self._neutron_update_floating_ip(float_id,
                                             port_id=None,
                                             ignore_not_found=True)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self._validate_update_properties(prop_diff)
            if self.ALLOCATION_ID in prop_diff or self.EIP in prop_diff:
                self._handle_update_eipInfo(prop_diff)
            elif (self.INSTANCE_ID in prop_diff
                  or self.NETWORK_INTERFACE_ID in prop_diff):
                self._handle_update_portInfo(prop_diff)
示例#20
0
class ExtraRoute(neutron.NeutronResource):
    """Resource for specifying extra routes for Neutron router.

    Resource allows to specify nexthop IP and destination network for router.
    """

    required_service_extension = 'extraroute'

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('Use this resource at your own risk.'))

    PROPERTIES = (
        ROUTER_ID, DESTINATION, NEXTHOP,
    ) = (
        'router_id', 'destination', 'nexthop',
    )

    properties_schema = {
        ROUTER_ID: properties.Schema(
            properties.Schema.STRING,
            description=_('The router id.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('neutron.router')
            ]
        ),
        DESTINATION: properties.Schema(
            properties.Schema.STRING,
            description=_('Network in CIDR notation.'),
            required=True),
        NEXTHOP: properties.Schema(
            properties.Schema.STRING,
            description=_('Nexthop IP address.'),
            required=True)
    }

    def add_dependencies(self, deps):
        super(ExtraRoute, self).add_dependencies(deps)
        for resource in self.stack.values():
            # depend on any RouterInterface in this template with the same
            # router_id as this router_id
            if resource.has_interface('OS::Neutron::RouterInterface'):
                try:
                    router_id = self.properties[self.ROUTER_ID]
                    dep_router_id = resource.properties.get(
                        router.RouterInterface.ROUTER)
                except (ValueError, TypeError):
                    # Properties errors will be caught later in validation,
                    # where we can report them in their proper context.
                    continue
                if dep_router_id == router_id:
                    deps += (self, resource)
            # depend on any RouterGateway in this template with the same
            # router_id as this router_id
            elif resource.has_interface('OS::Neutron::RouterGateway'):
                try:
                    router_id = self.properties[self.ROUTER_ID]
                    dep_router_id = resource.properties.get(
                        router.RouterGateway.ROUTER_ID)
                except (ValueError, TypeError):
                    # Properties errors will be caught later in validation,
                    # where we can report them in their proper context.
                    continue
                if dep_router_id == router_id:
                    deps += (self, resource)

    def handle_create(self):
        router_id = self.properties.get(self.ROUTER_ID)
        routes = self.client().show_router(
            router_id).get('router').get('routes')
        if not routes:
            routes = []
        new_route = {'destination': self.properties[self.DESTINATION],
                     'nexthop': self.properties[self.NEXTHOP]}
        if new_route in routes:
            msg = _('Route duplicates an existing route.')
            raise exception.Error(msg)
        routes.append(new_route.copy())
        self.client().update_router(router_id,
                                    {'router': {'routes': routes}})
        new_route['router_id'] = router_id
        self.resource_id_set(
            '%(router_id)s:%(destination)s:%(nexthop)s' % new_route)

    def handle_delete(self):
        if not self.resource_id:
            return
        router_id = self.properties[self.ROUTER_ID]
        with self.client_plugin().ignore_not_found:
            routes = self.client().show_router(
                router_id).get('router').get('routes', [])
            try:
                routes.remove(
                    {'destination': self.properties[self.DESTINATION],
                     'nexthop': self.properties[self.NEXTHOP]}
                )
            except ValueError:
                return
            self.client().update_router(router_id,
                                        {'router': {'routes': routes}})
示例#21
0
class Policy(resource.Resource):
    """A resource that creates a Senlin Policy.

    A policy is a set of rules that can be checked and/or enforced when
    an action is performed on a Cluster.
    """

    support_status = support.SupportStatus(version='6.0.0')

    default_client_name = 'senlin'

    PROPERTIES = (
        NAME,
        TYPE,
        POLICY_PROPS,
        BINDINGS,
    ) = ('name', 'type', 'properties', 'bindings')

    _BINDINGS = (
        BD_CLUSTER,
        BD_ENABLED,
    ) = ('cluster', 'enabled')

    _ACTION_STATUS = (
        ACTION_SUCCEEDED,
        ACTION_FAILED,
    ) = (
        'SUCCEEDED',
        'FAILED',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the senlin policy. By default, physical resource name '
              'is used.'),
            update_allowed=True,
        ),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of senlin policy.'),
            required=True,
            constraints=[constraints.CustomConstraint('senlin.policy_type')]),
        POLICY_PROPS:
        properties.Schema(
            properties.Schema.MAP,
            _('Properties of this policy.'),
        ),
        BINDINGS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of clusters to which this policy is attached.'),
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    BD_CLUSTER:
                    properties.Schema(
                        properties.Schema.STRING,
                        _("The name or ID of target cluster."),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('senlin.cluster')
                        ]),
                    BD_ENABLED:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _("Whether enable this policy on that cluster."),
                        default=True,
                    ),
                }))
    }

    def remove_bindings(self, bindings):
        for bd in bindings:
            try:
                bd['action'] = self.client().cluster_detach_policy(
                    bd[self.BD_CLUSTER], self.resource_id)['action']
                bd['finished'] = False
            except Exception as ex:
                # policy didn't attach to cluster, skip.
                if (self.client_plugin().is_bad_request(ex)
                        or self.client_plugin().is_not_found(ex)):
                    bd['finished'] = True
                else:
                    raise ex

    def add_bindings(self, bindings):
        for bd in bindings:
            bd['action'] = self.client().cluster_attach_policy(
                bd[self.BD_CLUSTER],
                self.resource_id,
                enabled=bd[self.BD_ENABLED])['action']
            bd['finished'] = False

    def check_action_done(self, bindings):
        ret = True
        if not bindings:
            return ret
        for bd in bindings:
            if bd.get('finished', False):
                continue
            action = self.client().get_action(bd['action'])
            if action.status == self.ACTION_SUCCEEDED:
                bd['finished'] = True
            elif action.status == self.ACTION_FAILED:
                err_msg = _('Failed to execute %(action)s for '
                            '%(cluster)s: %(reason)s') % {
                                'action': action.action,
                                'cluster': bd[self.BD_CLUSTER],
                                'reason': action.status_reason
                            }
                raise exception.ResourceInError(status_reason=err_msg,
                                                resource_status=self.FAILED)
            else:
                ret = False
        return ret

    def handle_create(self):
        params = {
            'name': (self.properties[self.NAME]
                     or self.physical_resource_name()),
            'spec':
            self.client_plugin().generate_spec(
                self.properties[self.TYPE], self.properties[self.POLICY_PROPS])
        }

        policy = self.client().create_policy(**params)
        self.resource_id_set(policy.id)
        bindings = copy.deepcopy(self.properties[self.BINDINGS])
        if bindings:
            self.add_bindings(bindings)
        return bindings

    def check_create_complete(self, bindings):
        return self.check_action_done(bindings)

    def handle_delete(self):
        return copy.deepcopy(self.properties[self.BINDINGS])

    def check_delete_complete(self, bindings):
        if not self.resource_id:
            return True
        self.remove_bindings(bindings)
        if self.check_action_done(bindings):
            with self.client_plugin().ignore_not_found:
                self.client().delete_policy(self.resource_id)
                return True
        return False

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if self.NAME in prop_diff:
            param = {'name': prop_diff[self.NAME]}
            self.client().update_policy(self.resource_id, **param)
        actions = dict()
        if self.BINDINGS in prop_diff:
            old = self.properties[self.BINDINGS] or []
            new = prop_diff[self.BINDINGS] or []
            actions['remove'] = [bd for bd in old if bd not in new]
            actions['add'] = [bd for bd in new if bd not in old]
            self.remove_bindings(actions['remove'])
        return actions

    def check_update_complete(self, actions):
        ret = True
        remove_done = self.check_action_done(actions.get('remove', []))
        # wait until detach finished, then start attach
        if remove_done and 'add' in actions:
            if not actions.get('add_started', False):
                self.add_bindings(actions['add'])
                actions['add_started'] = True
            ret = self.check_action_done(actions['add'])
        return ret

    def _show_resource(self):
        policy = self.client().get_policy(self.resource_id)
        return policy.to_dict()
示例#22
0
class KeystoneProject(resource.Resource):
    """Heat Template Resource for Keystone Project.

    Projects represent the base unit of ownership in OpenStack, in that all
    resources in OpenStack should be owned by a specific project. A project
    itself must be owned by a specific domain, and hence all project names are
    not globally unique, but unique to their domain. If the domain for a
    project is not specified, then it is added to the default domain.
    """

    support_status = support.SupportStatus(
        version='2015.1', message=_('Supported versions: keystone v3'))

    default_client_name = 'keystone'

    entity = 'projects'

    PROPERTIES = (
        NAME,
        DOMAIN,
        DESCRIPTION,
        ENABLED,
        PARENT,
    ) = (
        'name',
        'domain',
        'description',
        'enabled',
        'parent',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of keystone project.'),
                          update_allowed=True),
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or id of keystone domain.'),
            default='default',
            update_allowed=True,
            constraints=[constraints.CustomConstraint('keystone.domain')]),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of keystone project.'),
                          default='',
                          update_allowed=True),
        ENABLED:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('This project is enabled or disabled.'),
                          default=True,
                          update_allowed=True),
        PARENT:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of parent of this keystone project '
              'in hierarchy.'),
            support_status=support.SupportStatus(version='6.0.0'),
            constraints=[constraints.CustomConstraint('keystone.project')]),
    }

    def client(self):
        return super(KeystoneProject, self).client().client

    def handle_create(self):
        project_name = (self.properties[self.NAME]
                        or self.physical_resource_name())
        description = self.properties[self.DESCRIPTION]
        domain = self.client_plugin().get_domain_id(
            self.properties[self.DOMAIN])
        enabled = self.properties[self.ENABLED]
        pp = self.properties[self.PARENT]
        parent = self.client_plugin().get_project_id(pp)

        project = self.client().projects.create(name=project_name,
                                                domain=domain,
                                                description=description,
                                                enabled=enabled,
                                                parent=parent)

        self.resource_id_set(project.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            name = None
            # Don't update the name if no change
            if self.NAME in prop_diff:
                name = prop_diff[self.NAME] or self.physical_resource_name()

            description = prop_diff.get(self.DESCRIPTION)
            enabled = prop_diff.get(self.ENABLED)
            domain = (prop_diff.get(self.DOMAIN)
                      or self._stored_properties_data.get(self.DOMAIN))
            domain_id = self.client_plugin().get_domain_id(domain)

            self.client().projects.update(project=self.resource_id,
                                          name=name,
                                          description=description,
                                          enabled=enabled,
                                          domain=domain_id)
示例#23
0
class Port(neutron.NeutronResource):
    """A resource for managing Neutron ports.

    A port represents a virtual switch port on a logical network switch.
    Virtual instances attach their interfaces into ports. The logical port also
    defines the MAC address and the IP address(es) to be assigned to the
    interfaces plugged into them. When IP addresses are associated to a port,
    this also implies the port is associated with a subnet, as the IP address
    was taken from the allocation pool for a specific subnet.
    """

    PROPERTIES = (
        NAME,
        NETWORK_ID,
        NETWORK,
        FIXED_IPS,
        SECURITY_GROUPS,
        REPLACEMENT_POLICY,
        DEVICE_ID,
        DEVICE_OWNER,
        DNS_NAME,
    ) = (
        'name',
        'network_id',
        'network',
        'fixed_ips',
        'security_groups',
        'replacement_policy',
        'device_id',
        'device_owner',
        'dns_name',
    )

    EXTRA_PROPERTIES = (
        VALUE_SPECS,
        ADMIN_STATE_UP,
        MAC_ADDRESS,
        ALLOWED_ADDRESS_PAIRS,
        VNIC_TYPE,
        QOS_POLICY,
        PORT_SECURITY_ENABLED,
    ) = (
        'value_specs',
        'admin_state_up',
        'mac_address',
        'allowed_address_pairs',
        'binding:vnic_type',
        'qos_policy',
        'port_security_enabled',
    )

    _FIXED_IP_KEYS = (
        FIXED_IP_SUBNET_ID,
        FIXED_IP_SUBNET,
        FIXED_IP_IP_ADDRESS,
    ) = (
        'subnet_id',
        'subnet',
        'ip_address',
    )

    _ALLOWED_ADDRESS_PAIR_KEYS = (
        ALLOWED_ADDRESS_PAIR_MAC_ADDRESS,
        ALLOWED_ADDRESS_PAIR_IP_ADDRESS,
    ) = (
        'mac_address',
        'ip_address',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        DEVICE_ID_ATTR,
        DEVICE_OWNER_ATTR,
        FIXED_IPS_ATTR,
        MAC_ADDRESS_ATTR,
        NAME_ATTR,
        NETWORK_ID_ATTR,
        SECURITY_GROUPS_ATTR,
        STATUS,
        TENANT_ID,
        ALLOWED_ADDRESS_PAIRS_ATTR,
        SUBNETS_ATTR,
        PORT_SECURITY_ENABLED_ATTR,
        QOS_POLICY_ATTR,
        DNS_ASSIGNMENT,
    ) = (
        'admin_state_up',
        'device_id',
        'device_owner',
        'fixed_ips',
        'mac_address',
        'name',
        'network_id',
        'security_groups',
        'status',
        'tenant_id',
        'allowed_address_pairs',
        'subnets',
        'port_security_enabled',
        'qos_policy_id',
        'dns_assignment',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('A symbolic name for this port.'),
                          update_allowed=True),
        NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % NETWORK,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Network this port belongs to. If you plan to use current port '
              'to assign Floating IP, you should specify %(fixed_ips)s '
              'with %(subnet)s. Note if this changes to a different network '
              'update, the port will be replaced.') % {
                  'fixed_ips': FIXED_IPS,
                  'subnet': FIXED_IP_SUBNET
              },
            support_status=support.SupportStatus(version='2014.2'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        DEVICE_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Device ID of this port.'),
                          update_allowed=True),
        DEVICE_OWNER:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the network owning the port. '
                            'The value is typically network:floatingip '
                            'or network:router_interface or network:dhcp.'),
                          update_allowed=True),
        FIXED_IPS:
        properties.Schema(
            properties.Schema.LIST,
            _('Desired IPs for this port.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    FIXED_IP_SUBNET_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            version='5.0.0',
                            message=_('Use property %s.') % FIXED_IP_SUBNET,
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED, version='2014.2 ')),
                        constraints=[
                            constraints.CustomConstraint('neutron.subnet')
                        ]),
                    FIXED_IP_SUBNET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Subnet in which to allocate the IP address for '
                          'this port.'),
                        support_status=support.SupportStatus(version='2014.2'),
                        constraints=[
                            constraints.CustomConstraint('neutron.subnet')
                        ]),
                    FIXED_IP_IP_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('IP address desired in the subnet for this port.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        SECURITY_GROUPS:
        properties.Schema(properties.Schema.LIST,
                          _('Security group IDs to associate with this port.'),
                          update_allowed=True),
        REPLACEMENT_POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to respond to a stack-update for this resource. '
              'REPLACE_ALWAYS will replace the port regardless of any '
              'property changes. AUTO will update the existing port for any '
              'changed update-allowed property.'),
            default='AUTO',
            constraints=[
                constraints.AllowedValues(['REPLACE_ALWAYS', 'AUTO']),
            ],
            update_allowed=True,
            support_status=support.SupportStatus(
                status=support.DEPRECATED,
                version='6.0.0',
                message=_('Replacement policy used to work around flawed '
                          'nova/neutron port interaction which has been '
                          'fixed since Liberty.'),
                previous_status=support.SupportStatus(version='2014.2'))),
        DNS_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS name associated with the port.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('dns_name')],
            support_status=support.SupportStatus(version='7.0.0'),
        ),
    }

    # NOTE(prazumovsky): properties_schema has been separated because some
    # properties used in server for creating internal port.
    extra_properties_schema = {
        VALUE_SPECS:
        properties.Schema(properties.Schema.MAP,
                          _('Extra parameters to include in the request.'),
                          default={},
                          update_allowed=True),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of this port.'),
                          default=True,
                          update_allowed=True),
        MAC_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('MAC address to give to this port. The default update policy '
              'of this property in neutron is that allow admin role only.'),
            constraints=[constraints.CustomConstraint('mac_addr')],
            update_allowed=True,
        ),
        ALLOWED_ADDRESS_PAIRS:
        properties.Schema(
            properties.Schema.LIST,
            _('Additional MAC/IP address pairs allowed to pass through the '
              'port.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOWED_ADDRESS_PAIR_MAC_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('MAC address to allow through this port.'),
                        constraints=[constraints.CustomConstraint('mac_addr')
                                     ]),
                    ALLOWED_ADDRESS_PAIR_IP_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('IP address to allow through this port.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('net_cidr')
                                     ]),
                },
            ),
            update_allowed=True,
        ),
        VNIC_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The vnic type to be bound on the neutron port. '
              'To support SR-IOV PCI passthrough networking, you can request '
              'that the neutron port to be realized as normal (virtual nic), '
              'direct (pci passthrough), or macvtap '
              '(virtual interface with a tap-like software interface). Note '
              'that this only works for Neutron deployments that support '
              'the bindings extension.'),
            constraints=[
                constraints.AllowedValues([
                    'normal', 'direct', 'macvtap', 'direct-physical',
                    'baremetal'
                ]),
            ],
            support_status=support.SupportStatus(version='2015.1'),
            update_allowed=True),
        PORT_SECURITY_ENABLED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Flag to enable/disable port security on the port. '
              'When disable this feature(set it to False), there will be no '
              'packages filtering, like security-group and address-pairs.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='5.0.0')),
        QOS_POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of QoS policy to attach to this port.'),
            constraints=[constraints.CustomConstraint('neutron.qos_policy')],
            update_allowed=True,
            support_status=support.SupportStatus(version='6.0.0')),
    }

    # Need to update properties_schema with other properties before
    # initialisation, because resource should contain all properties before
    # creating. Also, documentation should correctly resolves resource
    # properties schema.
    properties_schema.update(extra_properties_schema)

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_("The administrative state of this port."),
                          type=attributes.Schema.STRING),
        DEVICE_ID_ATTR:
        attributes.Schema(_("Unique identifier for the device."),
                          type=attributes.Schema.STRING),
        DEVICE_OWNER:
        attributes.Schema(_("Name of the network owning the port."),
                          type=attributes.Schema.STRING),
        FIXED_IPS_ATTR:
        attributes.Schema(_("Fixed IP addresses."),
                          type=attributes.Schema.LIST),
        MAC_ADDRESS_ATTR:
        attributes.Schema(_("MAC address of the port."),
                          type=attributes.Schema.STRING),
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the port."),
                          type=attributes.Schema.STRING),
        NETWORK_ID_ATTR:
        attributes.Schema(
            _("Unique identifier for the network owning the port."),
            type=attributes.Schema.STRING),
        SECURITY_GROUPS_ATTR:
        attributes.Schema(_("A list of security groups for the port."),
                          type=attributes.Schema.LIST),
        STATUS:
        attributes.Schema(_("The status of the port."),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_("Tenant owning the port."),
                          type=attributes.Schema.STRING),
        ALLOWED_ADDRESS_PAIRS_ATTR:
        attributes.Schema(_(
            "Additional MAC/IP address pairs allowed to pass through "
            "a port."),
                          type=attributes.Schema.LIST),
        SUBNETS_ATTR:
        attributes.Schema(_("A list of all subnet attributes for the port."),
                          type=attributes.Schema.LIST),
        PORT_SECURITY_ENABLED_ATTR:
        attributes.Schema(
            _("Port security enabled of the port."),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.BOOLEAN),
        QOS_POLICY_ATTR:
        attributes.Schema(
            _("The QoS policy ID attached to this port."),
            type=attributes.Schema.STRING,
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        DNS_ASSIGNMENT:
        attributes.Schema(
            _("The DNS assigned to this port."),
            type=attributes.Schema.MAP,
            support_status=support.SupportStatus(version='7.0.0'),
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.NETWORK],
                                        value_path=[self.NETWORK_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.FIXED_IPS, self.FIXED_IP_SUBNET],
                                        value_name=self.FIXED_IP_SUBNET_ID),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.FIXED_IPS, self.FIXED_IP_SUBNET],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='subnet')
        ]

    def add_dependencies(self, deps):
        super(Port, self).add_dependencies(deps)
        # Depend on any Subnet in this template with the same
        # network_id as this network_id.
        # It is not known which subnet a port might be assigned
        # to so all subnets in a network should be created before
        # the ports in that network.
        for res in six.itervalues(self.stack):
            if res.has_interface('OS::Neutron::Subnet'):
                dep_network = res.properties.get(subnet.Subnet.NETWORK)
                network = self.properties[self.NETWORK]
                if dep_network == network:
                    deps += (self, res)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['network_id'] = props.pop(self.NETWORK)
        self._prepare_port_properties(props)
        qos_policy = props.pop(self.QOS_POLICY, None)
        if qos_policy:
            props['qos_policy_id'] = self.client_plugin().get_qos_policy_id(
                qos_policy)

        port = self.client().create_port({'port': props})['port']
        self.resource_id_set(port['id'])

    def _prepare_port_properties(self, props, prepare_for_update=False):
        if self.FIXED_IPS in props:
            fixed_ips = props[self.FIXED_IPS]
            if fixed_ips:
                for fixed_ip in fixed_ips:
                    for key, value in list(fixed_ip.items()):
                        if value is None:
                            fixed_ip.pop(key)
                    if self.FIXED_IP_SUBNET in fixed_ip:
                        fixed_ip['subnet_id'] = fixed_ip.pop(
                            self.FIXED_IP_SUBNET)
            else:
                # Passing empty list would have created a port without
                # fixed_ips during CREATE and released the existing
                # fixed_ips during UPDATE (default neutron behaviour).
                # However, for backward compatibility we will let neutron
                # assign ip for CREATE and leave the assigned ips during
                # UPDATE by not passing it. ref bug #1538473.
                del props[self.FIXED_IPS]
        # delete empty MAC addresses so that Neutron validation code
        # wouldn't fail as it not accepts Nones
        if self.ALLOWED_ADDRESS_PAIRS in props:
            address_pairs = props[self.ALLOWED_ADDRESS_PAIRS]
            if address_pairs:
                for pair in address_pairs:
                    if (self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS in pair
                            and pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] is
                            None):
                        del pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS]
            else:
                props[self.ALLOWED_ADDRESS_PAIRS] = []
        # if without 'security_groups', don't set the 'security_groups'
        # property when creating, neutron will create the port with the
        # 'default' securityGroup. If has the 'security_groups' and the
        # value is [], which means to create the port without securityGroup.
        if self.SECURITY_GROUPS in props:
            if props.get(self.SECURITY_GROUPS) is not None:
                props[self.SECURITY_GROUPS] = self.client_plugin(
                ).get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
            else:
                # And the update should has the same behavior.
                if prepare_for_update:
                    props[self.SECURITY_GROUPS] = self.client_plugin(
                    ).get_secgroup_uuids(['default'])

        if self.REPLACEMENT_POLICY in props:
            del (props[self.REPLACEMENT_POLICY])

    def _show_resource(self):
        return self.client().show_port(self.resource_id)['port']

    def check_create_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def handle_delete(self):
        try:
            self.client().delete_port(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.SUBNETS_ATTR:
            subnets = []
            try:
                fixed_ips = self._show_resource().get('fixed_ips', [])
                for fixed_ip in fixed_ips:
                    subnet_id = fixed_ip.get('subnet_id')
                    if subnet_id:
                        subnets.append(
                            self.client().show_subnet(subnet_id)['subnet'])
            except Exception as ex:
                LOG.warning(_LW("Failed to fetch resource attributes: %s"), ex)
                return
            return subnets
        return super(Port, self)._resolve_attribute(name)

    def needs_replace(self, after_props):
        """Mandatory replace based on props."""
        return after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS'

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            if self.QOS_POLICY in prop_diff:
                qos_policy = prop_diff.pop(self.QOS_POLICY)
                prop_diff['qos_policy_id'] = self.client_plugin(
                ).get_qos_policy_id(qos_policy) if qos_policy else None
            self._prepare_port_properties(prop_diff, prepare_for_update=True)
            LOG.debug('updating port with %s' % prop_diff)
            self.client().update_port(self.resource_id, {'port': prop_diff})

    def check_update_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def prepare_for_replace(self):
        # if the port has not been created yet, return directly
        if self.resource_id is None:
            return
        # store port fixed_ips for restoring after failed update
        fixed_ips = self._show_resource().get('fixed_ips', [])
        self.data_set('port_fip', jsonutils.dumps(fixed_ips))
        # reset fixed_ips for this port by setting fixed_ips to []
        props = {'fixed_ips': []}
        self.client().update_port(self.resource_id, {'port': props})

    def restore_prev_rsrc(self, convergence=False):
        # In case of convergence, during rollback, the previous rsrc is
        # already selected and is being acted upon.
        backup_stack = self.stack._backup_stack()
        backup_res = backup_stack.resources.get(self.name)
        prev_port = self if convergence else backup_res
        fixed_ips = prev_port.data().get('port_fip', [])

        props = {'fixed_ips': []}
        if convergence:
            existing_port, rsrc_owning_stack, stack = resource.Resource.load(
                prev_port.context, prev_port.replaced_by, True,
                prev_port.stack.cache_data)
            existing_port_id = existing_port.resource_id
        else:
            existing_port_id = self.resource_id
        if existing_port_id:
            # reset fixed_ips to [] for new resource
            self.client().update_port(existing_port_id, {'port': props})
        if fixed_ips and prev_port.resource_id:
            # restore ip for old port
            prev_port_props = {'fixed_ips': jsonutils.loads(fixed_ips)}
            self.client().update_port(prev_port.resource_id,
                                      {'port': prev_port_props})
示例#24
0
class FloatingIP(neutron.NeutronResource):
    """A resource for managing Neutron floating ips.

    Floating IP addresses can change their association between routers by
    action of the user. One of the most common use cases for floating IPs is
    to provide public IP addresses to a private cloud, where there are a
    limited number of IP addresses available. Another is for a public cloud
    user to have a "static" IP address that can be reassigned when an instance
    is upgraded or moved.
    """

    entity = 'floatingip'

    PROPERTIES = (
        FLOATING_NETWORK_ID,
        FLOATING_NETWORK,
        FLOATING_SUBNET,
        VALUE_SPECS,
        PORT_ID,
        FIXED_IP_ADDRESS,
        FLOATING_IP_ADDRESS,
        DNS_NAME,
        DNS_DOMAIN,
    ) = (
        'floating_network_id',
        'floating_network',
        'floating_subnet',
        'value_specs',
        'port_id',
        'fixed_ip_address',
        'floating_ip_address',
        'dns_name',
        'dns_domain',
    )

    ATTRIBUTES = (
        ROUTER_ID,
        TENANT_ID,
        FLOATING_NETWORK_ID_ATTR,
        FIXED_IP_ADDRESS_ATTR,
        FLOATING_IP_ADDRESS_ATTR,
        PORT_ID_ATTR,
    ) = (
        'router_id',
        'tenant_id',
        'floating_network_id',
        'fixed_ip_address',
        'floating_ip_address',
        'port_id',
    )

    properties_schema = {
        FLOATING_NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % FLOATING_NETWORK,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        FLOATING_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Network to allocate floating IP from.'),
            support_status=support.SupportStatus(version='2014.2'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        FLOATING_SUBNET:
        properties.Schema(
            properties.Schema.STRING,
            _('Subnet to allocate floating IP from.'),
            support_status=support.SupportStatus(version='9.0.0'),
            constraints=[constraints.CustomConstraint('neutron.subnet')],
        ),
        VALUE_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the "floatingip" object in the '
              'creation request. Parameters are often specific to installed '
              'hardware or extensions.'),
            default={}),
        PORT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of an existing port with at least one IP address to '
              'associate with this floating IP.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('neutron.port')]),
        FIXED_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('IP address to use if the port has multiple addresses.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('ip_addr')]),
        FLOATING_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('IP address of the floating IP. NOTE: The default policy '
              'setting in Neutron restricts usage of this property to '
              'administrative users only.'),
            constraints=[constraints.CustomConstraint('ip_addr')],
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        DNS_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS name associated with floating ip.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('rel_dns_name')],
            support_status=support.SupportStatus(version='7.0.0'),
        ),
        DNS_DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS domain associated with floating ip.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('dns_domain')],
            support_status=support.SupportStatus(version='7.0.0'),
        ),
    }

    attributes_schema = {
        ROUTER_ID:
        attributes.Schema(_(
            'ID of the router used as gateway, set when associated with a '
            'port.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('The tenant owning this floating IP.'),
                          type=attributes.Schema.STRING),
        FLOATING_NETWORK_ID_ATTR:
        attributes.Schema(
            _('ID of the network in which this IP is allocated.'),
            type=attributes.Schema.STRING),
        FIXED_IP_ADDRESS_ATTR:
        attributes.Schema(
            _('IP address of the associated port, if specified.'),
            type=attributes.Schema.STRING,
            cache_mode=attributes.Schema.CACHE_NONE),
        FLOATING_IP_ADDRESS_ATTR:
        attributes.Schema(_('The allocated address of this IP.'),
                          type=attributes.Schema.STRING),
        PORT_ID_ATTR:
        attributes.Schema(_('ID of the port associated with this IP.'),
                          type=attributes.Schema.STRING,
                          cache_mode=attributes.Schema.CACHE_NONE),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.FLOATING_NETWORK],
                                        value_path=[self.FLOATING_NETWORK_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.FLOATING_NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network'),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.FLOATING_SUBNET],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='subnet',
            )
        ]

    def _add_router_interface_dependencies(self, deps, resource):
        def port_on_subnet(resource, subnet):
            if not resource.has_interface('OS::Neutron::Port'):
                return False

            fixed_ips = resource.properties.get(port.Port.FIXED_IPS)
            if not fixed_ips:
                # During create we have only unresolved value for
                # functions, so can not use None value for building
                # correct dependencies. Depend on all RouterInterfaces
                # when the port has no fixed IP specified, since we
                # can't safely assume that any are in different
                # networks.
                if subnet is None:
                    return True

                p_net = (resource.properties.get(port.Port.NETWORK)
                         or resource.properties.get(port.Port.NETWORK_ID))
                if p_net:
                    network = self.client().show_network(p_net)['network']
                    return subnet in network['subnets']
            else:
                for fixed_ip in resource.properties.get(port.Port.FIXED_IPS):

                    port_subnet = (fixed_ip.get(port.Port.FIXED_IP_SUBNET) or
                                   fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID))
                    if subnet == port_subnet:
                        return True
            return False

        interface_subnet = (
            resource.properties.get(router.RouterInterface.SUBNET)
            or resource.properties.get(router.RouterInterface.SUBNET_ID))
        for d in deps.graph()[self]:
            if port_on_subnet(d, interface_subnet):
                deps += (self, resource)
                break

    def add_dependencies(self, deps):
        super(FloatingIP, self).add_dependencies(deps)

        for resource in six.itervalues(self.stack):
            # depend on any RouterGateway in this template with the same
            # network_id as this floating_network_id
            if resource.has_interface('OS::Neutron::RouterGateway'):
                gateway_network = resource.properties.get(
                    router.RouterGateway.NETWORK) or resource.properties.get(
                        router.RouterGateway.NETWORK_ID)
                floating_network = self.properties[self.FLOATING_NETWORK]
                if gateway_network == floating_network:
                    deps += (self, resource)

            # depend on any RouterInterface in this template which interfaces
            # with the same subnet that this floating IP's port is assigned
            # to
            elif resource.has_interface('OS::Neutron::RouterInterface'):
                self._add_router_interface_dependencies(deps, resource)
            # depend on Router with EXTERNAL_GATEWAY_NETWORK property
            # this template with the same network_id as this
            # floating_network_id
            elif resource.has_interface('OS::Neutron::Router'):
                gateway = resource.properties.get(
                    router.Router.EXTERNAL_GATEWAY)
                if gateway:
                    gateway_network = gateway.get(
                        router.Router.EXTERNAL_GATEWAY_NETWORK)
                    floating_network = self.properties[self.FLOATING_NETWORK]
                    if gateway_network == floating_network:
                        deps += (self, resource)

    def validate(self):
        super(FloatingIP, self).validate()
        # fixed_ip_address cannot be specified without a port_id
        if self.properties[self.PORT_ID] is None and self.properties[
                self.FIXED_IP_ADDRESS] is not None:
            raise exception.ResourcePropertyDependency(
                prop1=self.FIXED_IP_ADDRESS, prop2=self.PORT_ID)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['floating_network_id'] = props.pop(self.FLOATING_NETWORK)
        if self.FLOATING_SUBNET in props:
            props['subnet_id'] = props.pop(self.FLOATING_SUBNET)
        fip = self.client().create_floatingip({'floatingip':
                                               props})['floatingip']
        self.resource_id_set(fip['id'])

    def handle_delete(self):
        with self.client_plugin().ignore_not_found:
            self.client().delete_floatingip(self.resource_id)
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            port_id = prop_diff.get(self.PORT_ID,
                                    self.properties[self.PORT_ID])

            fixed_ip_address = prop_diff.get(
                self.FIXED_IP_ADDRESS, self.properties[self.FIXED_IP_ADDRESS])

            request_body = {
                'floatingip': {
                    'port_id': port_id,
                    'fixed_ip_address': fixed_ip_address
                }
            }

            self.client().update_floatingip(self.resource_id, request_body)
示例#25
0
class PoolMember(neutron.NeutronResource):
    """A resource to handle loadbalancer members."""

    required_service_extension = 'lbaas'

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        POOL_ID, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
    ) = (
        'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR, TENANT_ID, WEIGHT_ATTR, ADDRESS_ATTR,
        POOL_ID_ATTR, PROTOCOL_PORT_ATTR,
    ) = (
        'admin_state_up', 'tenant_id', 'weight', 'address',
        'pool_id', 'protocol_port',
    )

    properties_schema = {
        POOL_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the load balancing pool.'),
            required=True,
            update_allowed=True
        ),
        ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('IP address of the pool member on the pool network.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('ip_addr')
            ]
        ),
        PROTOCOL_PORT: properties.Schema(
            properties.Schema.INTEGER,
            _('TCP port on which the pool member listens for requests or '
              'connections.'),
            required=True,
            constraints=[
                constraints.Range(0, 65535),
            ]
        ),
        WEIGHT: properties.Schema(
            properties.Schema.INTEGER,
            _('Weight of pool member in the pool (default to 1).'),
            constraints=[
                constraints.Range(0, 256),
            ],
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the pool member.'),
            default=True
        ),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR: attributes.Schema(
            _('The administrative state of this pool member.'),
            type=attributes.Schema.STRING
        ),
        TENANT_ID: attributes.Schema(
            _('Tenant owning the pool member.'),
            type=attributes.Schema.STRING
        ),
        WEIGHT_ATTR: attributes.Schema(
            _('Weight of the pool member in the pool.'),
            type=attributes.Schema.STRING
        ),
        ADDRESS_ATTR: attributes.Schema(
            _('IP address of the pool member.'),
            type=attributes.Schema.STRING
        ),
        POOL_ID_ATTR: attributes.Schema(
            _('The ID of the load balancing pool.'),
            type=attributes.Schema.STRING
        ),
        PROTOCOL_PORT_ATTR: attributes.Schema(
            _('TCP port on which the pool member listens for requests or '
              'connections.'),
            type=attributes.Schema.STRING
        ),
    }

    def handle_create(self):
        pool = self.properties[self.POOL_ID]
        protocol_port = self.properties[self.PROTOCOL_PORT]
        address = self.properties[self.ADDRESS]
        admin_state_up = self.properties[self.ADMIN_STATE_UP]
        weight = self.properties[self.WEIGHT]

        params = {
            'pool_id': pool,
            'address': address,
            'protocol_port': protocol_port,
            'admin_state_up': admin_state_up
        }

        if weight is not None:
            params['weight'] = weight

        member = self.client().create_member({'member': params})['member']
        self.resource_id_set(member['id'])

    def _show_resource(self):
        return self.client().show_member(self.resource_id)['member']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_member(
                self.resource_id, {'member': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_member(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
示例#26
0
class FloatingIPAssociation(neutron.NeutronResource):
    """A resource for associating floating ips and ports.

    This resource allows associating a floating IP to a port with at least one
    IP address to associate with this floating IP.
    """
    PROPERTIES = (
        FLOATINGIP_ID,
        PORT_ID,
        FIXED_IP_ADDRESS,
    ) = (
        'floatingip_id',
        'port_id',
        'fixed_ip_address',
    )

    properties_schema = {
        FLOATINGIP_ID:
        properties.Schema(properties.Schema.STRING,
                          _('ID of the floating IP to associate.'),
                          required=True,
                          update_allowed=True),
        PORT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of an existing port with at least one IP address to '
              'associate with this floating IP.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('neutron.port')]),
        FIXED_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('IP address to use if the port has multiple addresses.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('ip_addr')]),
    }

    def add_dependencies(self, deps):
        super(FloatingIPAssociation, self).add_dependencies(deps)

        for resource in six.itervalues(self.stack):
            if resource.has_interface('OS::Neutron::RouterInterface'):

                def port_on_subnet(resource, subnet):
                    if not resource.has_interface('OS::Neutron::Port'):
                        return False
                    fixed_ips = resource.properties.get(
                        port.Port.FIXED_IPS) or []
                    for fixed_ip in fixed_ips:
                        port_subnet = (fixed_ip.get(port.Port.FIXED_IP_SUBNET)
                                       or fixed_ip.get(
                                           port.Port.FIXED_IP_SUBNET_ID))
                        return subnet == port_subnet
                    return False

                interface_subnet = (resource.properties.get(
                    router.RouterInterface.SUBNET) or resource.properties.get(
                        router.RouterInterface.SUBNET_ID))
                for d in deps.graph()[self]:
                    if port_on_subnet(d, interface_subnet):
                        deps += (self, resource)
                        break

    def handle_create(self):
        props = self.prepare_properties(self.properties, self.name)
        floatingip_id = props.pop(self.FLOATINGIP_ID)
        self.client().update_floatingip(floatingip_id, {'floatingip': props})
        self.resource_id_set(self.id)

    def handle_delete(self):
        if not self.resource_id:
            return

        with self.client_plugin().ignore_not_found:
            self.client().update_floatingip(
                self.properties[self.FLOATINGIP_ID],
                {'floatingip': {
                    'port_id': None
                }})

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            floatingip_id = self.properties[self.FLOATINGIP_ID]
            port_id = self.properties[self.PORT_ID]
            # if the floatingip_id is changed, disassociate the port which
            # associated with the old floatingip_id
            if self.FLOATINGIP_ID in prop_diff:
                with self.client_plugin().ignore_not_found:
                    self.client().update_floatingip(
                        floatingip_id, {'floatingip': {
                            'port_id': None
                        }})

            # associate the floatingip with the new port
            floatingip_id = (prop_diff.get(self.FLOATINGIP_ID)
                             or floatingip_id)
            port_id = prop_diff.get(self.PORT_ID) or port_id

            fixed_ip_address = (prop_diff.get(self.FIXED_IP_ADDRESS)
                                or self.properties[self.FIXED_IP_ADDRESS])

            request_body = {
                'floatingip': {
                    'port_id': port_id,
                    'fixed_ip_address': fixed_ip_address
                }
            }

            self.client().update_floatingip(floatingip_id, request_body)
            self.resource_id_set(self.id)
示例#27
0
class TapFlow(neutron.NeutronResource):
    """A resource for neutron tap-as-a-service tap-flow.

    This plug-in requires neutron-taas. So to enable this
    plug-in, install this library and restart the heat-engine.

    A Tap-Flow represents the port from which the traffic needs
    to be mirrored.
    """

    required_service_extension = 'taas'

    entity = 'tap_flow'

    support_status = support.SupportStatus(version='12.0.0')

    PROPERTIES = (NAME, DESCRIPTION, PORT, TAP_SERVICE, DIRECTION,
                  VLAN_FILTER) = ('name', 'description', 'port', 'tap_service',
                                  'direction', 'vlan_filter')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the Tap-Flow.'),
                          default="",
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the Tap-Flow.'),
                          default="",
                          update_allowed=True),
        PORT:
        properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the tap-flow neutron port.'),
            constraints=[constraints.CustomConstraint('neutron.port')],
            required=True,
        ),
        TAP_SERVICE:
        properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the neutron tap-service.'),
            constraints=[
                constraints.CustomConstraint('neutron.taas.tap_service')
            ],
            required=True,
        ),
        DIRECTION:
        properties.Schema(properties.Schema.STRING,
                          _('The Direction to capture the traffic on.'),
                          default='BOTH',
                          constraints=[
                              constraints.AllowedValues(['IN', 'OUT', 'BOTH']),
                          ]),
        VLAN_FILTER:
        properties.Schema(
            properties.Schema.STRING,
            _('Comma separated list of VLANs, data for which needs to be '
              'captured on probe VM.'),
            constraints=[
                constraints.AllowedPattern(COMMA_SEPARATED_LIST_REGEX),
            ],
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.PORT],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='port'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.TAP_SERVICE],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='tap_service')
        ]

    def _show_resource(self):
        return self.client_plugin().show_ext_resource('tap_flow',
                                                      self.resource_id)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['source_port'] = props.pop(self.PORT)
        props['tap_service_id'] = props.pop(self.TAP_SERVICE)
        tap_flow = self.client_plugin().create_ext_resource('tap_flow', props)
        self.resource_id_set(tap_flow['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client_plugin().update_ext_resource('tap_flow', prop_diff,
                                                     self.resource_id)

    def handle_delete(self):
        if self.resource_id is None:
            return
        with self.client_plugin().ignore_not_found:
            self.client_plugin().delete_ext_resource('tap_flow',
                                                     self.resource_id)

    def check_create_complete(self, data):
        return self.client_plugin().check_ext_resource_status(
            'tap_flow', self.resource_id)

    def check_update_complete(self, prop_diff):
        if prop_diff:
            return self.client_plugin().check_ext_resource_status(
                'tap_flow', self.resource_id)
        return True

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        with self.client_plugin().ignore_not_found:
            try:
                if self.client_plugin().check_ext_resource_status(
                        'tap_flow', self.resource_id):
                    self.client_plugin().delete_ext_resource(
                        'tap_flow', self.resource_id)
            except exception.ResourceInError:
                # Still try to delete tap resource in error state
                self.client_plugin().delete_ext_resource(
                    'tap_flow', self.resource_id)
            return False

        return True
示例#28
0
文件: user.py 项目: cryptickp/heat
class KeystoneUser(resource.Resource,
                   role_assignments.KeystoneRoleAssignmentMixin):
    """Heat Template Resource for Keystone User."""

    support_status = support.SupportStatus(
        version='2015.1', message=_('Supported versions: keystone v3'))

    default_client_name = 'keystone'

    PROPERTIES = (NAME, DOMAIN, DESCRIPTION, ENABLED, EMAIL, PASSWORD,
                  DEFAULT_PROJECT, GROUPS) = ('name', 'domain', 'description',
                                              'enabled', 'email', 'password',
                                              'default_project', 'groups')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of keystone user.'),
                          update_allowed=True),
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of keystone domain.'),
            default='default',
            update_allowed=True,
            constraints=[constraints.CustomConstraint('keystone.domain')]),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of keystone user.'),
                          default='',
                          update_allowed=True),
        ENABLED:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Keystone user is enabled or disabled'),
                          default=True,
                          update_allowed=True),
        EMAIL:
        properties.Schema(properties.Schema.STRING,
                          _('Email address of keystone user.'),
                          update_allowed=True),
        PASSWORD:
        properties.Schema(properties.Schema.STRING,
                          _('Password of keystone user.'),
                          update_allowed=True),
        DEFAULT_PROJECT:
        properties.Schema(
            properties.Schema.STRING,
            _('Default project of keystone user.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('keystone.project')]),
        GROUPS:
        properties.Schema(
            properties.Schema.LIST,
            _('keystone user groups.'),
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.STRING,
                _('keystone user group.'),
                constraints=[constraints.CustomConstraint('keystone.group')]))
    }

    properties_schema.update(
        role_assignments.KeystoneRoleAssignmentMixin.mixin_properties_schema)

    def validate(self):
        super(KeystoneUser, self).validate()
        self.validate_assignment_properties()

    def _create_user(self,
                     user_name,
                     description,
                     domain,
                     default_project,
                     enabled=None,
                     email=None,
                     password=None):
        domain = (self.client_plugin().get_domain_id(domain))
        if default_project:
            default_project = (
                self.client_plugin().get_project_id(default_project))

        return self.client().client.users.create(
            name=user_name,
            domain=domain,
            description=description,
            enabled=enabled,
            email=email,
            password=password,
            default_project=default_project)

    def _delete_user(self, user_id):
        return self.client().client.users.delete(user_id)

    def _update_user(self,
                     user_id,
                     domain,
                     new_name=None,
                     new_description=None,
                     new_email=None,
                     new_password=None,
                     new_default_project=None,
                     enabled=None):
        values = dict()

        if new_name is not None:
            values['name'] = new_name
        if new_description is not None:
            values['description'] = new_description
        if new_email is not None:
            values['email'] = new_email
        if new_password is not None:
            values['password'] = new_password
        if new_default_project is not None:
            values['default_project'] = new_default_project
        if enabled is not None:
            values['enabled'] = enabled

        values['user'] = user_id
        domain = (self.client_plugin().get_domain_id(domain))

        values['domain'] = domain

        return self.client().client.users.update(**values)

    def _add_user_to_groups(self, user_id, groups):
        if groups is not None:
            group_ids = [
                self.client_plugin().get_group_id(group) for group in groups
            ]

            for group_id in group_ids:
                self.client().client.users.add_to_group(user_id, group_id)

    def _remove_user_from_groups(self, user_id, groups):
        if groups is not None:
            group_ids = [
                self.client_plugin().get_group_id(group) for group in groups
            ]

            for group_id in group_ids:
                self.client().client.users.remove_from_group(user_id, group_id)

    def _find_diff(self, updated_prps, stored_prps):
        new_group_ids = [
            self.client_plugin().get_group_id(group)
            for group in (set(updated_prps or []) - set(stored_prps or []))
        ]

        removed_group_ids = [
            self.client_plugin().get_group_id(group)
            for group in (set(stored_prps or []) - set(updated_prps or []))
        ]

        return new_group_ids, removed_group_ids

    def handle_create(self):
        user_name = (self.properties.get(self.NAME)
                     or self.physical_resource_name())
        description = self.properties.get(self.DESCRIPTION)
        domain = self.properties.get(self.DOMAIN)
        enabled = self.properties.get(self.ENABLED)
        email = self.properties.get(self.EMAIL)
        password = self.properties.get(self.PASSWORD)
        default_project = self.properties.get(self.DEFAULT_PROJECT)
        groups = self.properties.get(self.GROUPS)

        user = self._create_user(user_name=user_name,
                                 description=description,
                                 domain=domain,
                                 enabled=enabled,
                                 email=email,
                                 password=password,
                                 default_project=default_project)

        self.resource_id_set(user.id)

        self._add_user_to_groups(user.id, groups)

        self.create_assignment(user_id=user.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        name = prop_diff.get(self.NAME) or self.physical_resource_name()
        description = prop_diff.get(self.DESCRIPTION)
        enabled = prop_diff.get(self.ENABLED)
        email = prop_diff.get(self.EMAIL)
        password = prop_diff.get(self.PASSWORD)
        domain = (prop_diff.get(self.DOMAIN)
                  or self._stored_properties_data.get(self.DOMAIN))
        default_project = prop_diff.get(self.DEFAULT_PROJECT)

        (new_group_ids, removed_group_ids) = self._find_diff(
            prop_diff.get(self.GROUPS),
            self._stored_properties_data.get(self.GROUPS))

        self._update_user(user_id=self.resource_id,
                          domain=domain,
                          new_name=name,
                          new_description=description,
                          enabled=enabled,
                          new_default_project=default_project,
                          new_email=email,
                          new_password=password)

        if len(new_group_ids) > 0:
            self._add_user_to_groups(self.resource_id, new_group_ids)

        if len(removed_group_ids) > 0:
            self._remove_user_from_groups(self.resource_id, removed_group_ids)

        self.update_assignment(prop_diff=prop_diff, user_id=self.resource_id)

    def handle_delete(self):
        if self.resource_id is not None:
            try:
                self.delete_assignment(user_id=self.resource_id)

                if self._stored_properties_data.get(self.GROUPS) is not None:
                    self._remove_user_from_groups(self.resource_id, [
                        self.client_plugin().get_group_id(group) for group in
                        self._stored_properties_data.get(self.GROUPS)
                    ])

                self._delete_user(user_id=self.resource_id)
            except Exception as ex:
                self.client_plugin().ignore_not_found(ex)
示例#29
0
class KeystoneGroup(resource.Resource,
                    role_assignments.KeystoneRoleAssignmentMixin):
    """Heat Template Resource for Keystone Group."""

    support_status = support.SupportStatus(
        version='2015.1', message=_('Supported versions: keystone v3'))

    default_client_name = 'keystone'

    entity = 'groups'

    PROPERTIES = (NAME, DOMAIN, DESCRIPTION) = ('name', 'domain',
                                                'description')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of keystone group.'),
                          update_allowed=True),
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or id of keystone domain.'),
            default='default',
            update_allowed=True,
            constraints=[constraints.CustomConstraint('keystone.domain')]),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of keystone group.'),
                          default='',
                          update_allowed=True)
    }

    properties_schema.update(
        role_assignments.KeystoneRoleAssignmentMixin.mixin_properties_schema)

    def validate(self):
        super(KeystoneGroup, self).validate()
        self.validate_assignment_properties()

    def client(self):
        return super(KeystoneGroup, self).client().client

    def handle_create(self):
        group_name = (self.properties[self.NAME]
                      or self.physical_resource_name())
        description = self.properties[self.DESCRIPTION]
        domain = self.client_plugin().get_domain_id(
            self.properties[self.DOMAIN])

        group = self.client().groups.create(name=group_name,
                                            domain=domain,
                                            description=description)

        self.resource_id_set(group.id)

        self.create_assignment(group_id=group.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            name = prop_diff.get(self.NAME) or self.physical_resource_name()
            description = prop_diff.get(self.DESCRIPTION)
            domain = (prop_diff.get(self.DOMAIN)
                      or self._stored_properties_data.get(self.DOMAIN))
            domain_id = self.client_plugin().get_domain_id(domain)

            self.client().groups.update(group=self.resource_id,
                                        name=name,
                                        description=description,
                                        domain_id=domain_id)

            self.update_assignment(prop_diff=prop_diff,
                                   group_id=self.resource_id)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin.ignore_not_found:
                self.client().groups.delete(self.resource_id)
示例#30
0
class Port(neutron.NeutronResource):

    PROPERTIES = (
        NETWORK_ID,
        NETWORK,
        NAME,
        VALUE_SPECS,
        ADMIN_STATE_UP,
        FIXED_IPS,
        MAC_ADDRESS,
        DEVICE_ID,
        SECURITY_GROUPS,
        ALLOWED_ADDRESS_PAIRS,
        DEVICE_OWNER,
        REPLACEMENT_POLICY,
        VNIC_TYPE,
    ) = (
        'network_id',
        'network',
        'name',
        'value_specs',
        'admin_state_up',
        'fixed_ips',
        'mac_address',
        'device_id',
        'security_groups',
        'allowed_address_pairs',
        'device_owner',
        'replacement_policy',
        'binding:vnic_type',
    )

    _FIXED_IP_KEYS = (
        FIXED_IP_SUBNET_ID,
        FIXED_IP_SUBNET,
        FIXED_IP_IP_ADDRESS,
    ) = (
        'subnet_id',
        'subnet',
        'ip_address',
    )

    _ALLOWED_ADDRESS_PAIR_KEYS = (
        ALLOWED_ADDRESS_PAIR_MAC_ADDRESS,
        ALLOWED_ADDRESS_PAIR_IP_ADDRESS,
    ) = (
        'mac_address',
        'ip_address',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        DEVICE_ID_ATTR,
        DEVICE_OWNER_ATTR,
        FIXED_IPS_ATTR,
        MAC_ADDRESS_ATTR,
        NAME_ATTR,
        NETWORK_ID_ATTR,
        SECURITY_GROUPS_ATTR,
        STATUS,
        TENANT_ID,
        ALLOWED_ADDRESS_PAIRS_ATTR,
        SHOW,
        SUBNETS_ATTR,
    ) = (
        'admin_state_up',
        'device_id',
        'device_owner',
        'fixed_ips',
        'mac_address',
        'name',
        'network_id',
        'security_groups',
        'status',
        'tenant_id',
        'allowed_address_pairs',
        'show',
        'subnets',
    )

    properties_schema = {
        NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                support.DEPRECATED,
                _('Use property %s.') % NETWORK),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Network this port belongs to.'),
            support_status=support.SupportStatus(version='2014.2'),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('A symbolic name for this port.'),
                          update_allowed=True),
        VALUE_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the "port" object in the '
              'creation request.'),
            default={}),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of this port.'),
                          default=True,
                          update_allowed=True),
        FIXED_IPS:
        properties.Schema(
            properties.Schema.LIST,
            _('Desired IPs for this port.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    FIXED_IP_SUBNET_ID:
                    properties.Schema(properties.Schema.STRING,
                                      support_status=support.SupportStatus(
                                          support.DEPRECATED,
                                          _('Use property %s.') %
                                          FIXED_IP_SUBNET)),
                    FIXED_IP_SUBNET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Subnet in which to allocate the IP address for '
                          'this port.'),
                        support_status=support.SupportStatus(
                            version='2014.2')),
                    FIXED_IP_IP_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('IP address desired in the subnet for this port.')),
                },
            ),
            update_allowed=True),
        MAC_ADDRESS:
        properties.Schema(properties.Schema.STRING,
                          _('MAC address to give to this port.')),
        DEVICE_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Device ID of this port.'),
                          update_allowed=True),
        SECURITY_GROUPS:
        properties.Schema(properties.Schema.LIST,
                          _('Security group IDs to associate with this port.'),
                          update_allowed=True),
        ALLOWED_ADDRESS_PAIRS:
        properties.Schema(
            properties.Schema.LIST,
            _('Additional MAC/IP address pairs allowed to pass through the '
              'port.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOWED_ADDRESS_PAIR_MAC_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('MAC address to allow through this port.')),
                    ALLOWED_ADDRESS_PAIR_IP_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('IP address to allow through this port.'),
                        required=True),
                },
            )),
        DEVICE_OWNER:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the network owning the port. '
                            'The value is typically network:floatingip '
                            'or network:router_interface or network:dhcp'),
                          update_allowed=True),
        REPLACEMENT_POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to respond to a stack-update for this resource. '
              'REPLACE_ALWAYS will replace the port regardless of any '
              'property changes. AUTO will update the existing port for any '
              'changed update-allowed property.'),
            default='REPLACE_ALWAYS',
            constraints=[
                constraints.AllowedValues(['REPLACE_ALWAYS', 'AUTO']),
            ],
            update_allowed=True),
        VNIC_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The vnic type to be bound on the neutron port. '
              'To support SR-IOV PCI passthrough networking, you can request '
              'that the neutron port to be realized as normal (virtual nic), '
              'direct (pci passthrough), or macvtap '
              '(virtual interface with a tap-like software interface). Note'
              ' that this only works for Neutron deployments that support '
              'the bindings extension.'),
            constraints=[
                constraints.AllowedValues(['normal', 'direct', 'macvtap']),
            ],
            update_allowed=True),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_("The administrative state of this port.")),
        DEVICE_ID_ATTR:
        attributes.Schema(_("Unique identifier for the device.")),
        DEVICE_OWNER:
        attributes.Schema(_("Name of the network owning the port.")),
        FIXED_IPS_ATTR:
        attributes.Schema(_("Fixed IP addresses.")),
        MAC_ADDRESS_ATTR:
        attributes.Schema(_("MAC address of the port.")),
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the port.")),
        NETWORK_ID_ATTR:
        attributes.Schema(
            _("Unique identifier for the network owning the port.")),
        SECURITY_GROUPS_ATTR:
        attributes.Schema(_("A list of security groups for the port.")),
        STATUS:
        attributes.Schema(_("The status of the port.")),
        TENANT_ID:
        attributes.Schema(_("Tenant owning the port.")),
        ALLOWED_ADDRESS_PAIRS_ATTR:
        attributes.Schema(
            _("Additional MAC/IP address pairs allowed to pass through "
              "a port.")),
        SHOW:
        attributes.Schema(_("All attributes.")),
        SUBNETS_ATTR:
        attributes.Schema(_("A list of all subnet attributes for the port.")),
    }

    def validate(self):
        super(Port, self).validate()
        self._validate_depr_property_required(self.properties, self.NETWORK,
                                              self.NETWORK_ID)

    def add_dependencies(self, deps):
        super(Port, self).add_dependencies(deps)
        # Depend on any Subnet in this template with the same
        # network_id as this network_id.
        # It is not known which subnet a port might be assigned
        # to so all subnets in a network should be created before
        # the ports in that network.
        for res in self.stack.itervalues():
            if res.has_interface('OS::Neutron::Subnet'):
                dep_network = res.properties.get(
                    subnet.Subnet.NETWORK) or res.properties.get(
                        subnet.Subnet.NETWORK_ID)
                network = self.properties.get(
                    self.NETWORK) or self.properties.get(self.NETWORK_ID)
                if dep_network == network:
                    deps += (self, res)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        self.client_plugin().resolve_network(props, self.NETWORK, 'network_id')
        self._prepare_port_properties(props)

        port = self.neutron().create_port({'port': props})['port']
        self.resource_id_set(port['id'])

    def _prepare_port_properties(self, props, prepare_for_update=False):
        for fixed_ip in props.get(self.FIXED_IPS, []):
            for key, value in fixed_ip.items():
                if value is None:
                    fixed_ip.pop(key)
            if fixed_ip.get(self.FIXED_IP_SUBNET):
                self.client_plugin().resolve_subnet(fixed_ip,
                                                    self.FIXED_IP_SUBNET,
                                                    'subnet_id')
        # delete empty MAC addresses so that Neutron validation code
        # wouldn't fail as it not accepts Nones
        for pair in props.get(self.ALLOWED_ADDRESS_PAIRS, []):
            if (self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS in pair
                    and pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] is None):
                del pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS]

        # if without 'security_groups', don't set the 'security_groups'
        # property when creating, neutron will create the port with the
        # 'default' securityGroup. If has the 'security_groups' and the
        # value is [], which means to create the port without securityGroup.
        if props.get(self.SECURITY_GROUPS) is not None:
            props[self.SECURITY_GROUPS] = self.client_plugin(
            ).get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
        else:
            # And the update should has the same behavior.
            if prepare_for_update:
                props[self.SECURITY_GROUPS] = self.client_plugin(
                ).get_secgroup_uuids(['default'])

        if not props[self.FIXED_IPS]:
            del (props[self.FIXED_IPS])

        del (props[self.REPLACEMENT_POLICY])

    def _show_resource(self):
        return self.neutron().show_port(self.resource_id)['port']

    def check_create_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def handle_delete(self):
        client = self.neutron()
        try:
            client.delete_port(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _resolve_attribute(self, name):
        if name == self.SUBNETS_ATTR:
            subnets = []
            try:
                fixed_ips = self._show_resource().get('fixed_ips', [])
                for fixed_ip in fixed_ips:
                    subnet_id = fixed_ip.get('subnet_id')
                    if subnet_id:
                        subnets.append(
                            self.neutron().show_subnet(subnet_id)['subnet'])
            except Exception as ex:
                LOG.warn(_LW("Failed to fetch resource attributes: %s"), ex)
                return
            return subnets
        return super(Port, self)._resolve_attribute(name)

    def _needs_update(self, after, before, after_props, before_props,
                      prev_resource):

        if after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS':
            raise resource.UpdateReplace(self.name)

        return super(Port, self)._needs_update(after, before, after_props,
                                               before_props, prev_resource)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        props = self.prepare_update_properties(json_snippet)

        self._prepare_port_properties(props, prepare_for_update=True)
        LOG.debug('updating port with %s' % props)
        self.neutron().update_port(self.resource_id, {'port': props})

    def check_update_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)