コード例 #1
0
ファイル: properties.py プロジェクト: Hybrid-Cloud/conveyor
        def constraints():
            def get_num(key):
                val = schema_dict.get(key)
                if val is not None:
                    val = Schema.str_to_num(val)
                return val

            if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
                yield constr.Range(get_num(MIN_VALUE), get_num(MAX_VALUE))
            if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
                yield constr.Length(get_num(MIN_LENGTH), get_num(MAX_LENGTH))
            if ALLOWED_VALUES in schema_dict:
                yield constr.AllowedValues(schema_dict[ALLOWED_VALUES])
            if ALLOWED_PATTERN in schema_dict:
                yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN])
コード例 #2
0
ファイル: parameters.py プロジェクト: Hybrid-Cloud/conveyor
        def constraints():
            desc = schema_dict.get(CONSTRAINT_DESCRIPTION)

            if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
                yield constr.Range(Schema.get_num(MIN_VALUE, schema_dict),
                                   Schema.get_num(MAX_VALUE, schema_dict),
                                   desc)
            if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
                yield constr.Length(Schema.get_num(MIN_LENGTH, schema_dict),
                                    Schema.get_num(MAX_LENGTH, schema_dict),
                                    desc)
            if ALLOWED_VALUES in schema_dict:
                yield constr.AllowedValues(schema_dict[ALLOWED_VALUES], desc)
            if ALLOWED_PATTERN in schema_dict:
                yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN], desc)
コード例 #3
0
        def constraints():
            constraints = schema_dict.get(cls.CONSTRAINTS)
            if constraints is None:
                return

            if not isinstance(constraints, list):
                raise exception.InvalidSchemaError(
                    message=_("Invalid parameter constraints for parameter "
                              "%s, expected a list") % param_name)

            for constraint in constraints:
                cls._check_dict(constraint, PARAM_CONSTRAINTS,
                                'parameter constraints')
                desc = constraint.get(DESCRIPTION)
                if RANGE in constraint:
                    cdef = constraint.get(RANGE)
                    cls._check_dict(cdef, RANGE_KEYS, 'range constraint')
                    yield constr.Range(parameters.Schema.get_num(MIN, cdef),
                                       parameters.Schema.get_num(MAX, cdef),
                                       desc)
                elif LENGTH in constraint:
                    cdef = constraint.get(LENGTH)
                    cls._check_dict(cdef, RANGE_KEYS, 'length constraint')
                    yield constr.Length(parameters.Schema.get_num(MIN, cdef),
                                        parameters.Schema.get_num(MAX, cdef),
                                        desc)
                elif ALLOWED_VALUES in constraint:
                    cdef = constraint.get(ALLOWED_VALUES)
                    yield constr.AllowedValues(cdef, desc)
                elif ALLOWED_PATTERN in constraint:
                    cdef = constraint.get(ALLOWED_PATTERN)
                    yield constr.AllowedPattern(cdef, desc)
                elif CUSTOM_CONSTRAINT in constraint:
                    cdef = constraint.get(CUSTOM_CONSTRAINT)
                    yield constr.CustomConstraint(cdef, desc)
                else:
                    raise exception.InvalidSchemaError(
                        message=_("No constraint expressed"))
コード例 #4
0
ファイル: parameters.py プロジェクト: Hybrid-Cloud/conveyor
    def _pseudo_parameters(self, stack_identifier):
        stack_id = (stack_identifier.arn()
                    if stack_identifier is not None else 'None')
        stack_name = stack_identifier and stack_identifier.stack_name

        yield Parameter(
            self.PARAM_STACK_ID,
            Schema(Schema.STRING, _('Stack ID'), default=str(stack_id)))
        if stack_name:
            yield Parameter(
                self.PARAM_STACK_NAME,
                Schema(Schema.STRING, _('Stack Name'), default=stack_name))
            yield Parameter(
                self.PARAM_REGION,
                Schema(Schema.STRING,
                       default='ap-southeast-1',
                       constraints=[
                           constr.AllowedValues([
                               'us-east-1', 'us-west-1', 'us-west-2',
                               'sa-east-1', 'eu-west-1', 'ap-southeast-1',
                               'ap-northeast-1'
                           ])
                       ]))
コード例 #5
0
ファイル: loadbalancer.py プロジェクト: Hybrid-Cloud/conveyor
class LoadBalancer(stack_resource.StackResource):
    """Implements a HAProxy-bearing instance as a nested stack.

    The template for the nested stack can be redefined with
    ``loadbalancer_template`` option in ``heat.conf``.

    Generally the image used for the instance must have the following
    packages installed or available for installation at runtime::

        - heat-cfntools and its dependencies like python-psutil
        - cronie
        - socat
        - haproxy

    Current default builtin template uses Fedora 21 x86_64 base cloud image
    (https://getfedora.org/cloud/download/)
    and apart from installing packages goes through some hoops
    around SELinux due to pecularities of heat-cfntools.
    """

    PROPERTIES = (
        AVAILABILITY_ZONES,
        HEALTH_CHECK,
        INSTANCES,
        LISTENERS,
        APP_COOKIE_STICKINESS_POLICY,
        LBCOOKIE_STICKINESS_POLICY,
        SECURITY_GROUPS,
        SUBNETS,
    ) = (
        'AvailabilityZones',
        'HealthCheck',
        'Instances',
        'Listeners',
        'AppCookieStickinessPolicy',
        'LBCookieStickinessPolicy',
        'SecurityGroups',
        'Subnets',
    )

    _HEALTH_CHECK_KEYS = (
        HEALTH_CHECK_HEALTHY_THRESHOLD,
        HEALTH_CHECK_INTERVAL,
        HEALTH_CHECK_TARGET,
        HEALTH_CHECK_TIMEOUT,
        HEALTH_CHECK_UNHEALTHY_THRESHOLD,
    ) = (
        'HealthyThreshold',
        'Interval',
        'Target',
        'Timeout',
        'UnhealthyThreshold',
    )

    _LISTENER_KEYS = (
        LISTENER_INSTANCE_PORT,
        LISTENER_LOAD_BALANCER_PORT,
        LISTENER_PROTOCOL,
        LISTENER_SSLCERTIFICATE_ID,
        LISTENER_POLICY_NAMES,
    ) = (
        'InstancePort',
        'LoadBalancerPort',
        'Protocol',
        'SSLCertificateId',
        'PolicyNames',
    )

    ATTRIBUTES = (
        CANONICAL_HOSTED_ZONE_NAME,
        CANONICAL_HOSTED_ZONE_NAME_ID,
        DNS_NAME,
        SOURCE_SECURITY_GROUP_GROUP_NAME,
        SOURCE_SECURITY_GROUP_OWNER_ALIAS,
    ) = (
        'CanonicalHostedZoneName',
        'CanonicalHostedZoneNameID',
        'DNSName',
        'SourceSecurityGroup.GroupName',
        'SourceSecurityGroup.OwnerAlias',
    )

    properties_schema = {
        AVAILABILITY_ZONES:
        properties.Schema(
            properties.Schema.LIST,
            _('The Availability Zones in which to create the load balancer.'),
            required=True),
        HEALTH_CHECK:
        properties.Schema(
            properties.Schema.MAP,
            _('An application health check for the instances.'),
            schema={
                HEALTH_CHECK_HEALTHY_THRESHOLD:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('The number of consecutive health probe successes '
                      'required before moving the instance to the '
                      'healthy state.'),
                    required=True),
                HEALTH_CHECK_INTERVAL:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('The approximate interval, in seconds, between '
                      'health checks of an individual instance.'),
                    required=True),
                HEALTH_CHECK_TARGET:
                properties.Schema(properties.Schema.STRING,
                                  _('The port being checked.'),
                                  required=True),
                HEALTH_CHECK_TIMEOUT:
                properties.Schema(properties.Schema.INTEGER,
                                  _('Health probe timeout, in seconds.'),
                                  required=True),
                HEALTH_CHECK_UNHEALTHY_THRESHOLD:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('The number of consecutive health probe failures '
                      'required before moving the instance to the '
                      'unhealthy state'),
                    required=True),
            }),
        INSTANCES:
        properties.Schema(properties.Schema.LIST,
                          _('The list of instance IDs load balanced.'),
                          update_allowed=True),
        LISTENERS:
        properties.Schema(
            properties.Schema.LIST,
            _('One or more listeners for this load balancer.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    LISTENER_INSTANCE_PORT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('TCP port on which the instance server is '
                          'listening.'),
                        required=True),
                    LISTENER_LOAD_BALANCER_PORT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The external load balancer port number.'),
                        required=True),
                    LISTENER_PROTOCOL:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The load balancer transport protocol to use.'),
                        required=True,
                        constraints=[
                            constraints.AllowedValues(['TCP', 'HTTP']),
                        ]),
                    LISTENER_SSLCERTIFICATE_ID:
                    properties.Schema(properties.Schema.STRING,
                                      _('Not Implemented.'),
                                      implemented=False),
                    LISTENER_POLICY_NAMES:
                    properties.Schema(properties.Schema.LIST,
                                      _('Not Implemented.'),
                                      implemented=False),
                },
            ),
            required=True),
        APP_COOKIE_STICKINESS_POLICY:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        LBCOOKIE_STICKINESS_POLICY:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        SECURITY_GROUPS:
        properties.Schema(properties.Schema.LIST,
                          _('List of Security Groups assigned on current LB.'),
                          update_allowed=True),
        SUBNETS:
        properties.Schema(properties.Schema.LIST,
                          _('Not Implemented.'),
                          implemented=False),
    }

    attributes_schema = {
        CANONICAL_HOSTED_ZONE_NAME:
        attributes.Schema(_(
            "The name of the hosted zone that is associated with the "
            "LoadBalancer."),
                          type=attributes.Schema.STRING),
        CANONICAL_HOSTED_ZONE_NAME_ID:
        attributes.Schema(_(
            "The ID of the hosted zone name that is associated with the "
            "LoadBalancer."),
                          type=attributes.Schema.STRING),
        DNS_NAME:
        attributes.Schema(_("The DNS name for the LoadBalancer."),
                          type=attributes.Schema.STRING),
        SOURCE_SECURITY_GROUP_GROUP_NAME:
        attributes.Schema(_(
            "The security group that you can use as part of your inbound "
            "rules for your LoadBalancer's back-end instances."),
                          type=attributes.Schema.STRING),
        SOURCE_SECURITY_GROUP_OWNER_ALIAS:
        attributes.Schema(_("Owner of the source security group."),
                          type=attributes.Schema.STRING),
    }

    def _haproxy_config_global(self):
        return '''
global
    daemon
    maxconn 256
    stats socket /tmp/.haproxy-stats

defaults
    mode http
    timeout connect 5000ms
    timeout client 50000ms
    timeout server 50000ms
'''

    def _haproxy_config_frontend(self):
        listener = self.properties[self.LISTENERS][0]
        lb_port = listener[self.LISTENER_LOAD_BALANCER_PORT]
        return '''
frontend http
    bind *:%s
    default_backend servers
''' % (lb_port)

    def _haproxy_config_backend(self):
        health_chk = self.properties[self.HEALTH_CHECK]
        if health_chk:
            timeout = int(health_chk[self.HEALTH_CHECK_TIMEOUT])
            timeout_check = 'timeout check %ds' % timeout
            spaces = '    '
        else:
            timeout_check = ''
            spaces = ''

        return '''
backend servers
    balance roundrobin
    option http-server-close
    option forwardfor
    option httpchk
%s%s
''' % (spaces, timeout_check)

    def _haproxy_config_servers(self, instances):
        listener = self.properties[self.LISTENERS][0]
        inst_port = listener[self.LISTENER_INSTANCE_PORT]
        spaces = '    '
        check = ''
        health_chk = self.properties[self.HEALTH_CHECK]
        if health_chk:
            check = ' check inter %ss fall %s rise %s' % (
                health_chk[self.HEALTH_CHECK_INTERVAL],
                health_chk[self.HEALTH_CHECK_UNHEALTHY_THRESHOLD],
                health_chk[self.HEALTH_CHECK_HEALTHY_THRESHOLD])

        servers = []
        n = 1
        nova_cp = self.client_plugin('nova')
        for i in instances or []:
            ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0'
            LOG.debug('haproxy server:%s' % ip)
            servers.append('%sserver server%d %s:%s%s' %
                           (spaces, n, ip, inst_port, check))
            n = n + 1
        return '\n'.join(servers)

    def _haproxy_config(self, instances):
        # initial simplifications:
        # - only one Listener
        # - only http (no tcp or ssl)
        #
        # option httpchk HEAD /check.txt HTTP/1.0
        return '%s%s%s%s\n' % (self._haproxy_config_global(),
                               self._haproxy_config_frontend(),
                               self._haproxy_config_backend(),
                               self._haproxy_config_servers(instances))

    def get_parsed_template(self):
        if cfg.CONF.loadbalancer_template:
            with open(cfg.CONF.loadbalancer_template) as templ_fd:
                LOG.info(_LI('Using custom loadbalancer template %s'),
                         cfg.CONF.loadbalancer_template)
                contents = templ_fd.read()
        else:
            contents = lb_template_default
        return template_format.parse(contents)

    def child_params(self):
        params = {}

        params['SecurityGroups'] = self.properties[self.SECURITY_GROUPS]
        # If the owning stack defines KeyName, we use that key for the nested
        # template, otherwise use no key
        for magic_param in ('KeyName', 'LbFlavor', 'LBTimeout', 'LbImageId'):
            if magic_param in self.stack.parameters:
                params[magic_param] = self.stack.parameters[magic_param]

        return params

    def child_template(self):
        templ = self.get_parsed_template()

        # If the owning stack defines KeyName, we use that key for the nested
        # template, otherwise use no key
        if 'KeyName' not in self.stack.parameters:
            del templ['Resources']['LB_instance']['Properties']['KeyName']
            del templ['Parameters']['KeyName']

        return templ

    def handle_create(self):
        templ = self.child_template()
        params = self.child_params()

        if self.properties[self.INSTANCES]:
            md = templ['Resources']['LB_instance']['Metadata']
            files = md['AWS::CloudFormation::Init']['config']['files']
            cfg = self._haproxy_config(self.properties[self.INSTANCES])
            files['/etc/haproxy/haproxy.cfg']['content'] = cfg

        return self.create_with_template(templ, params)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Re-generate the Metadata.

        Save it to the db.
        Rely on the cfn-hup to reconfigure HAProxy.
        """

        new_props = json_snippet.properties(self.properties_schema,
                                            self.context)

        # Valid use cases are:
        # - Membership controlled by members property in template
        # - Empty members property in template; membership controlled by
        #   "updates" triggered from autoscaling group.
        # Mixing the two will lead to undefined behaviour.
        if (self.INSTANCES in prop_diff
                and (self.properties[self.INSTANCES] is not None
                     or new_props[self.INSTANCES] is not None)):
            cfg = self._haproxy_config(prop_diff[self.INSTANCES])

            md = self.nested()['LB_instance'].metadata_get()
            files = md['AWS::CloudFormation::Init']['config']['files']
            files['/etc/haproxy/haproxy.cfg']['content'] = cfg

            self.nested()['LB_instance'].metadata_set(md)

        if self.SECURITY_GROUPS in prop_diff:
            templ = self.child_template()
            params = self.child_params()
            params['SecurityGroups'] = new_props[self.SECURITY_GROUPS]
            self.update_with_template(templ, params)

    def check_update_complete(self, updater):
        """Because we are not calling update_with_template, return True."""
        return True

    def validate(self):
        """Validate any of the provided params."""
        res = super(LoadBalancer, self).validate()
        if res:
            return res

        if (cfg.CONF.loadbalancer_template
                and not os.access(cfg.CONF.loadbalancer_template, os.R_OK)):
            msg = _('Custom LoadBalancer template can not be found')
            raise exception.StackValidationFailed(message=msg)

        health_chk = self.properties[self.HEALTH_CHECK]
        if health_chk:
            interval = float(health_chk[self.HEALTH_CHECK_INTERVAL])
            timeout = float(health_chk[self.HEALTH_CHECK_TIMEOUT])
            if interval < timeout:
                return {'Error': 'Interval must be larger than Timeout'}

    def get_reference_id(self):
        return six.text_type(self.name)

    def _resolve_attribute(self, name):
        """We don't really support any of these yet."""
        if name == self.DNS_NAME:
            return self.get_output('PublicIp')
        elif name in self.attributes_schema:
            # Not sure if we should return anything for the other attribs
            # since they aren't really supported in any meaningful way
            return ''
コード例 #6
0
ファイル: loadbalancer.py プロジェクト: Hybrid-Cloud/conveyor
class LoadBalancer(neutron.NeutronResource):
    """A resource for creating LBaaS v2 Load Balancers.

    This resource creates and manages Neutron LBaaS v2 Load Balancers,
    which allows traffic to be directed between servers.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'lbaasv2'

    PROPERTIES = (
        DESCRIPTION, NAME, PROVIDER, VIP_ADDRESS, VIP_SUBNET,
        ADMIN_STATE_UP, TENANT_ID
    ) = (
        'description', 'name', 'provider', 'vip_address', 'vip_subnet',
        'admin_state_up', 'tenant_id'
    )

    ATTRIBUTES = (
        VIP_ADDRESS_ATTR, VIP_PORT_ATTR, VIP_SUBNET_ATTR
    ) = (
        'vip_address', 'vip_port_id', 'vip_subnet_id'
    )

    properties_schema = {
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of this Load Balancer.'),
            update_allowed=True,
            default=''
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of this Load Balancer.'),
            update_allowed=True
        ),
        PROVIDER: properties.Schema(
            properties.Schema.STRING,
            _('Provider for this Load Balancer.'),
            constraints=[constraints.AllowedValues(['vlb'])]
        ),
        VIP_ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('IP address for the VIP.'),
            constraints=[
                constraints.CustomConstraint('ip_addr')
            ],
        ),
        VIP_SUBNET: properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of the subnet on which to allocate the VIP '
              'address.'),
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ],
            required=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of this Load Balancer.'),
            default=True,
            update_allowed=True,
            constraints=[constraints.AllowedValues(['True'])]
        ),
        TENANT_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the tenant who owns the Load Balancer. Only '
              'administrative users can specify a tenant ID other than '
              'their own.'),
            constraints=[
                constraints.CustomConstraint('keystone.project')
            ],
        )
    }

    attributes_schema = {
        VIP_ADDRESS_ATTR: attributes.Schema(
            _('The VIP address of the LoadBalancer.'),
            type=attributes.Schema.STRING
        ),
        VIP_PORT_ATTR: attributes.Schema(
            _('The VIP port of the LoadBalancer.'),
            type=attributes.Schema.STRING
        ),
        VIP_SUBNET_ATTR: attributes.Schema(
            _('The VIP subnet of the LoadBalancer.'),
            type=attributes.Schema.STRING
        )
    }

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name()
        )

        self.client_plugin().resolve_subnet(
            properties, self.VIP_SUBNET, 'vip_subnet_id')

        lb = self.client().create_loadbalancer(
            {'loadbalancer': properties})['loadbalancer']
        self.resource_id_set(lb['id'])

    def check_create_complete(self, data):
        return self.client_plugin().check_lb_status(self.resource_id)

    def _show_resource(self):
        return self.client().show_loadbalancer(
            self.resource_id)['loadbalancer']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_loadbalancer(
                self.resource_id,
                {'loadbalancer': prop_diff})
        return prop_diff

    def check_update_complete(self, prop_diff):
        if prop_diff:
            return self.client_plugin().check_lb_status(self.resource_id)
        return True

    def handle_delete(self):
        pass

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        try:
            try:
                if self.client_plugin().check_lb_status(self.resource_id):
                    self.client().delete_loadbalancer(self.resource_id)
            except exception.ResourceInError:
                # Still try to delete loadbalancer in error state
                self.client().delete_loadbalancer(self.resource_id)
        except exceptions.NotFound:
            # Resource is gone
            return True

        return False
コード例 #7
0
ファイル: eip.py プロジェクト: Hybrid-Cloud/conveyor
class ElasticIp(resource.Resource):
    PROPERTIES = (
        DOMAIN,
        INSTANCE_ID,
    ) = (
        'Domain',
        'InstanceId',
    )

    ATTRIBUTES = (ALLOCATION_ID, ) = ('AllocationId', )

    properties_schema = {
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Set to "vpc" to have IP address allocation associated to your '
              'VPC.'),
            constraints=[
                constraints.AllowedValues(['vpc']),
            ]),
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.server')]),
    }

    attributes_schema = {
        ALLOCATION_ID:
        attributes.Schema(_(
            'ID that AWS assigns to represent the allocation of the address '
            'for use with Amazon VPC. Returned only for VPC elastic IP '
            'addresses.'),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'nova'

    def __init__(self, name, json_snippet, stack):
        super(ElasticIp, self).__init__(name, json_snippet, stack)
        self.ipaddress = None

    def _ipaddress(self):
        if self.ipaddress is None and self.resource_id is not None:
            if self.properties[self.DOMAIN]:
                try:
                    ips = self.neutron().show_floatingip(self.resource_id)
                except Exception as ex:
                    self.client_plugin('neutron').ignore_not_found(ex)
                else:
                    self.ipaddress = ips['floatingip']['floating_ip_address']
            else:
                try:
                    ips = self.client().floating_ips.get(self.resource_id)
                except Exception as e:
                    self.client_plugin('nova').ignore_not_found(e)
                else:
                    self.ipaddress = ips.ip
        return self.ipaddress or ''

    def handle_create(self):
        """Allocate a floating IP for the current tenant."""
        ips = None
        if self.properties[self.DOMAIN]:
            ext_net = internet_gateway.InternetGateway.get_external_network_id(
                self.neutron())
            props = {'floating_network_id': ext_net}
            ips = self.neutron().create_floatingip({'floatingip':
                                                    props})['floatingip']
            self.ipaddress = ips['floating_ip_address']
            self.resource_id_set(ips['id'])
            LOG.info(_LI('ElasticIp create %s'), str(ips))
        else:
            try:
                ips = self.client().floating_ips.create()
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    if self.client_plugin('nova').is_not_found(e):
                        LOG.error(
                            _LE("No default floating IP pool configured."
                                " Set 'default_floating_pool' in "
                                "nova.conf."))

            if ips:
                self.ipaddress = ips.ip
                self.resource_id_set(ips.id)
                LOG.info(_LI('ElasticIp create %s'), str(ips))

        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            server = self.client().servers.get(instance_id)
            server.add_floating_ip(self._ipaddress())

    def handle_delete(self):
        if self.resource_id is None:
            return
        # may be just create an eip when creation, or create the association
        # failed when creation, there will no association, if we attempt to
        # disassociate, an exception will raised, we need
        # to catch and ignore it, and then to deallocate the eip
        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            try:
                server = self.client().servers.get(instance_id)
                if server:
                    server.remove_floating_ip(self._ipaddress())
            except Exception as e:
                is_not_found = self.client_plugin('nova').is_not_found(e)
                is_unprocessable_entity = self.client_plugin(
                    'nova').is_unprocessable_entity(e)

                if (not is_not_found and not is_unprocessable_entity):
                    raise

        # deallocate the eip
        if self.properties[self.DOMAIN]:
            with self.client_plugin('neutron').ignore_not_found:
                self.neutron().delete_floatingip(self.resource_id)
        else:
            with self.client_plugin('nova').ignore_not_found:
                self.client().floating_ips.delete(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.INSTANCE_ID in prop_diff:
                instance_id = prop_diff.get(self.INSTANCE_ID)
                if instance_id:
                    # no need to remove the floating ip from the old instance,
                    # nova does this automatically when calling
                    # add_floating_ip().
                    server = self.client().servers.get(instance_id)
                    server.add_floating_ip(self._ipaddress())
                else:
                    # to remove the floating_ip from the old instance
                    instance_id_old = self.properties[self.INSTANCE_ID]
                    if instance_id_old:
                        server = self.client().servers.get(instance_id_old)
                        server.remove_floating_ip(self._ipaddress())

    def get_reference_id(self):
        eip = self._ipaddress()
        if eip:
            return six.text_type(eip)
        else:
            return six.text_type(self.name)

    def _resolve_attribute(self, name):
        if name == self.ALLOCATION_ID:
            return six.text_type(self.resource_id)
コード例 #8
0
class S3Bucket(resource.Resource):

    PROPERTIES = (
        ACCESS_CONTROL,
        WEBSITE_CONFIGURATION,
        TAGS,
    ) = (
        'AccessControl',
        'WebsiteConfiguration',
        'Tags',
    )

    _WEBSITE_CONFIGURATION_KEYS = (
        WEBSITE_CONFIGURATION_INDEX_DOCUMENT,
        WEBSITE_CONFIGURATION_ERROR_DOCUMENT,
    ) = (
        'IndexDocument',
        'ErrorDocument',
    )

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    ATTRIBUTES = (
        DOMAIN_NAME,
        WEBSITE_URL,
    ) = (
        'DomainName',
        'WebsiteURL',
    )

    properties_schema = {
        ACCESS_CONTROL:
        properties.Schema(
            properties.Schema.STRING,
            _('A predefined access control list (ACL) that grants '
              'permissions on the bucket.'),
            constraints=[
                constraints.AllowedValues([
                    'Private', 'PublicRead', 'PublicReadWrite',
                    'AuthenticatedRead', 'BucketOwnerRead',
                    'BucketOwnerFullControl'
                ]),
            ]),
        WEBSITE_CONFIGURATION:
        properties.Schema(
            properties.Schema.MAP,
            _('Information used to configure the bucket as a static website.'),
            schema={
                WEBSITE_CONFIGURATION_INDEX_DOCUMENT:
                properties.Schema(properties.Schema.STRING,
                                  _('The name of the index document.')),
                WEBSITE_CONFIGURATION_ERROR_DOCUMENT:
                properties.Schema(properties.Schema.STRING,
                                  _('The name of the error document.')),
            }),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          _('Tags to attach to the bucket.'),
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    _('The tag key name.'),
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    _('The tag value.'),
                                                    required=True),
                              },
                          )),
    }

    attributes_schema = {
        DOMAIN_NAME:
        attributes.Schema(_('The DNS name of the specified bucket.'),
                          type=attributes.Schema.STRING),
        WEBSITE_URL:
        attributes.Schema(_('The website endpoint for the specified bucket.'),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'swift'

    def tags_to_headers(self):
        if self.properties[self.TAGS] is None:
            return {}
        return dict(
            ('X-Container-Meta-S3-Tag-' + tm[self.TAG_KEY], tm[self.TAG_VALUE])
            for tm in self.properties[self.TAGS])

    def handle_create(self):
        """Create a bucket."""
        container = self.physical_resource_name()
        headers = self.tags_to_headers()
        if self.properties[self.WEBSITE_CONFIGURATION] is not None:
            sc = self.properties[self.WEBSITE_CONFIGURATION]
            index_doc = sc[self.WEBSITE_CONFIGURATION_INDEX_DOCUMENT]
            error_doc = sc[self.WEBSITE_CONFIGURATION_ERROR_DOCUMENT]
            # we will assume that swift is configured for the staticweb
            # wsgi middleware
            headers['X-Container-Meta-Web-Index'] = index_doc
            headers['X-Container-Meta-Web-Error'] = error_doc

        con = self.context
        ac = self.properties[self.ACCESS_CONTROL]
        tenant_username = '******' % (con.tenant, con.username)
        if ac in ('PublicRead', 'PublicReadWrite'):
            headers['X-Container-Read'] = '.r:*'
        elif ac == 'AuthenticatedRead':
            headers['X-Container-Read'] = con.tenant
        else:
            headers['X-Container-Read'] = tenant_username

        if ac == 'PublicReadWrite':
            headers['X-Container-Write'] = '.r:*'
        else:
            headers['X-Container-Write'] = tenant_username

        self.client().put_container(container, headers)
        self.resource_id_set(container)

    def handle_delete(self):
        """Perform specified delete policy."""
        if self.resource_id is None:
            return
        try:
            self.client().delete_container(self.resource_id)
        except Exception as ex:
            if self.client_plugin().is_conflict(ex):
                container, objects = self.client().get_container(
                    self.resource_id)
                if objects:
                    msg = _("The bucket you tried to delete is not empty (%s)."
                            ) % self.resource_id
                    raise exception.ResourceActionNotSupported(action=msg)
            self.client_plugin().ignore_not_found(ex)

    def get_reference_id(self):
        return six.text_type(self.resource_id)

    def _resolve_attribute(self, name):
        url = self.client().get_auth()[0]
        parsed = list(urlparse.urlparse(url))
        if name == self.DOMAIN_NAME:
            return parsed[1].split(':')[0]
        elif name == self.WEBSITE_URL:
            return '%s://%s%s/%s' % (parsed[0], parsed[1], parsed[2],
                                     self.resource_id)
コード例 #9
0
class AWSScalingPolicy(heat_sp.AutoScalingPolicy):
    PROPERTIES = (
        AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
        COOLDOWN, MIN_ADJUSTMENT_STEP,
    ) = (
        'AutoScalingGroupName', 'ScalingAdjustment', 'AdjustmentType',
        'Cooldown', 'MinAdjustmentStep',
    )

    ATTRIBUTES = (
        ALARM_URL,
    ) = (
        'AlarmUrl',
    )

    properties_schema = {
        AUTO_SCALING_GROUP_NAME: properties.Schema(
            properties.Schema.STRING,
            _('AutoScaling group name to apply policy to.'),
            required=True
        ),
        SCALING_ADJUSTMENT: properties.Schema(
            properties.Schema.INTEGER,
            _('Size of adjustment.'),
            required=True,
            update_allowed=True
        ),
        ADJUSTMENT_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Type of adjustment (absolute or percentage).'),
            required=True,
            constraints=[
                constraints.AllowedValues(
                    [sc_util.CFN_CHANGE_IN_CAPACITY,
                     sc_util.CFN_EXACT_CAPACITY,
                     sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY]),
            ],
            update_allowed=True
        ),
        COOLDOWN: properties.Schema(
            properties.Schema.INTEGER,
            _('Cooldown period, in seconds.'),
            update_allowed=True
        ),
        MIN_ADJUSTMENT_STEP: properties.Schema(
            properties.Schema.INTEGER,
            _('Minimum number of resources that are added or removed '
              'when the AutoScaling group scales up or down. This can '
              'be used only when specifying PercentChangeInCapacity '
              'for the AdjustmentType property.'),
            constraints=[
                constraints.Range(
                    min=0,
                ),
            ],
            update_allowed=True
        ),

    }

    attributes_schema = {
        ALARM_URL: attributes.Schema(
            _("A signed url to handle the alarm. (Heat extension)."),
            type=attributes.Schema.STRING
        ),
    }

    def _validate_min_adjustment_step(self):
        adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE)
        adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP)
        if (adjustment_type != sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY
                and adjustment_step is not None):
            raise exception.ResourcePropertyValueDependency(
                prop1=self.MIN_ADJUSTMENT_STEP,
                prop2=self.ADJUSTMENT_TYPE,
                value=sc_util.CFN_PERCENT_CHANGE_IN_CAPACITY)

    def get_reference_id(self):
        if self.resource_id is not None:
            return six.text_type(self._get_ec2_signed_url())
        else:
            return six.text_type(self.name)
コード例 #10
0
class SoftwareDeployment(signal_responder.SignalResponder):
    """This resource associates a server with some configuration.

    The configuration is to be deployed to that server.

    A deployment allows input values to be specified which map to the inputs
    schema defined in the config resource. These input values are interpreted
    by the configuration tool in a tool-specific manner.

    Whenever this resource goes to an IN_PROGRESS state, it creates an
    ephemeral config that includes the inputs values plus a number of extra
    inputs which have names prefixed with deploy_. The extra inputs relate
    to the current state of the stack, along with the information and
    credentials required to signal back the deployment results.

    Unless signal_transport=NO_SIGNAL, this resource will remain in an
    IN_PROGRESS state until the server signals it with the output values
    for that deployment. Those output values are then available as resource
    attributes, along with the default attributes deploy_stdout,
    deploy_stderr and deploy_status_code.

    Specifying actions other than the default CREATE and UPDATE will result
    in the deployment being triggered in those actions. For example this would
    allow cleanup configuration to be performed during actions SUSPEND and
    DELETE. A config could be designed to only work with some specific
    actions, or a config can read the value of the deploy_action input to
    allow conditional logic to perform different configuration for different
    actions.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (CONFIG, SERVER, INPUT_VALUES, DEPLOY_ACTIONS, NAME,
                  SIGNAL_TRANSPORT) = ('config', 'server', 'input_values',
                                       'actions', 'name', 'signal_transport')

    ALLOWED_DEPLOY_ACTIONS = (
        resource.Resource.CREATE,
        resource.Resource.UPDATE,
        resource.Resource.DELETE,
        resource.Resource.SUSPEND,
        resource.Resource.RESUME,
    )

    ATTRIBUTES = (STDOUT, STDERR,
                  STATUS_CODE) = ('deploy_stdout', 'deploy_stderr',
                                  'deploy_status_code')

    DERIVED_CONFIG_INPUTS = (
        DEPLOY_SERVER_ID, DEPLOY_ACTION, DEPLOY_SIGNAL_ID, DEPLOY_STACK_ID,
        DEPLOY_RESOURCE_NAME, DEPLOY_AUTH_URL, DEPLOY_USERNAME,
        DEPLOY_PASSWORD, DEPLOY_PROJECT_ID, DEPLOY_USER_ID, DEPLOY_SIGNAL_VERB,
        DEPLOY_SIGNAL_TRANSPORT,
        DEPLOY_QUEUE_ID) = ('deploy_server_id', 'deploy_action',
                            'deploy_signal_id', 'deploy_stack_id',
                            'deploy_resource_name', 'deploy_auth_url',
                            'deploy_username', 'deploy_password',
                            'deploy_project_id', 'deploy_user_id',
                            'deploy_signal_verb', 'deploy_signal_transport',
                            'deploy_queue_id')

    SIGNAL_TRANSPORTS = (CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL, NO_SIGNAL,
                         ZAQAR_SIGNAL) = ('CFN_SIGNAL', 'TEMP_URL_SIGNAL',
                                          'HEAT_SIGNAL', 'NO_SIGNAL',
                                          'ZAQAR_SIGNAL')

    properties_schema = {
        CONFIG:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of software configuration resource to execute when '
              'applying to the server.'),
            update_allowed=True),
        SERVER:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of resource to apply configuration to. '
              'Normally this should be a Nova server ID.'),
            required=True,
        ),
        INPUT_VALUES:
        properties.Schema(
            properties.Schema.MAP,
            _('Input values to apply to the software configuration on this '
              'server.'),
            update_allowed=True),
        DEPLOY_ACTIONS:
        properties.Schema(
            properties.Schema.LIST,
            _('Which lifecycle actions of the deployment resource will result '
              'in this deployment being triggered.'),
            update_allowed=True,
            default=[resource.Resource.CREATE, resource.Resource.UPDATE],
            constraints=[constraints.AllowedValues(ALLOWED_DEPLOY_ACTIONS)]),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the derived config associated with this deployment. '
              'This is used to apply a sort order to the list of '
              'configurations currently deployed to a server.'),
            update_allowed=True),
        SIGNAL_TRANSPORT:
        properties.Schema(
            properties.Schema.STRING,
            _('How the server should signal to heat with the deployment '
              'output values. CFN_SIGNAL will allow an HTTP POST to a CFN '
              'keypair signed URL. TEMP_URL_SIGNAL will create a '
              'Swift TempURL to be signaled via HTTP PUT. HEAT_SIGNAL '
              'will allow calls to the Heat API resource-signal using the '
              'provided keystone credentials. ZAQAR_SIGNAL will create a '
              'dedicated zaqar queue to be signaled using the provided '
              'keystone credentials. NO_SIGNAL will result in the resource '
              'going to the COMPLETE state without waiting for any signal.'),
            default=cfg.CONF.default_deployment_signal_transport,
            constraints=[
                constraints.AllowedValues(SIGNAL_TRANSPORTS),
            ]),
    }

    attributes_schema = {
        STDOUT:
        attributes.Schema(
            _("Captured stdout from the configuration execution."),
            type=attributes.Schema.STRING),
        STDERR:
        attributes.Schema(
            _("Captured stderr from the configuration execution."),
            type=attributes.Schema.STRING),
        STATUS_CODE:
        attributes.Schema(
            _("Returned status code from the configuration execution."),
            type=attributes.Schema.STRING),
    }

    default_client_name = 'heat'

    no_signal_actions = ()

    # No need to make metadata_update() calls since deployments have a
    # dedicated API for changing state on signals
    signal_needs_metadata_updates = False

    def _signal_transport_cfn(self):
        return self.properties[self.SIGNAL_TRANSPORT] == self.CFN_SIGNAL

    def _signal_transport_heat(self):
        return self.properties[self.SIGNAL_TRANSPORT] == self.HEAT_SIGNAL

    def _signal_transport_none(self):
        return self.properties[self.SIGNAL_TRANSPORT] == self.NO_SIGNAL

    def _signal_transport_temp_url(self):
        return self.properties[self.SIGNAL_TRANSPORT] == self.TEMP_URL_SIGNAL

    def _signal_transport_zaqar(self):
        return self.properties.get(self.SIGNAL_TRANSPORT) == self.ZAQAR_SIGNAL

    def _build_properties(self, config_id, action):
        props = {
            'config_id': config_id,
            'action': action,
            'input_values': self.properties.get(self.INPUT_VALUES)
        }

        if self._signal_transport_none():
            props['status'] = SoftwareDeployment.COMPLETE
            props['status_reason'] = _('Not waiting for outputs signal')
        else:
            props['status'] = SoftwareDeployment.IN_PROGRESS
            props['status_reason'] = _('Deploy data available')
        return props

    def _delete_derived_config(self, derived_config_id):
        try:
            self.rpc_client().delete_software_config(self.context,
                                                     derived_config_id)
        except Exception as ex:
            self.rpc_client().ignore_error_named(ex, 'NotFound')

    def _get_derived_config(self, action, source_config):

        derived_params = self._build_derived_config_params(
            action, source_config)
        derived_config = self.rpc_client().create_software_config(
            self.context, **derived_params)
        return derived_config[rpc_api.SOFTWARE_CONFIG_ID]

    def _handle_action(self, action):
        if self.properties.get(self.CONFIG):
            config = self.rpc_client().show_software_config(
                self.context, self.properties.get(self.CONFIG))
        else:
            config = {}

        if config.get(rpc_api.SOFTWARE_CONFIG_GROUP) == 'component':
            valid_actions = set()
            for conf in config['config']['configs']:
                valid_actions.update(conf['actions'])
            if action not in valid_actions:
                return
        elif action not in self.properties[self.DEPLOY_ACTIONS]:
            return

        props = self._build_properties(
            self._get_derived_config(action, config), action)

        if self.resource_id is None:
            resource_id = str(uuid.uuid4())
            self.resource_id_set(resource_id)
            sd = self.rpc_client().create_software_deployment(
                self.context,
                deployment_id=resource_id,
                server_id=self.properties[SoftwareDeployment.SERVER],
                stack_user_project_id=self.stack.stack_user_project_id,
                **props)
        else:
            sd = self.rpc_client().show_software_deployment(
                self.context, self.resource_id)
            prev_derived_config = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID]
            sd = self.rpc_client().update_software_deployment(
                self.context, deployment_id=self.resource_id, **props)
            if prev_derived_config:
                self._delete_derived_config(prev_derived_config)
        if not self._signal_transport_none():
            # NOTE(pshchelo): sd is a simple dict, easy to serialize,
            # does not need fixing re LP bug #1393268
            return sd

    def _check_complete(self):
        sd = self.rpc_client().check_software_deployment(
            self.context, self.resource_id, self.stack.time_remaining())
        status = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS]
        if status == SoftwareDeployment.COMPLETE:
            return True
        elif status == SoftwareDeployment.FAILED:
            status_reason = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS_REASON]
            message = _("Deployment to server failed: %s") % status_reason
            LOG.info(message)
            raise exception.Error(message)

    def empty_config(self):
        return ''

    def _build_derived_config_params(self, action, source):
        scl = sc.SoftwareConfig
        derived_inputs = self._build_derived_inputs(action, source)
        derived_options = self._build_derived_options(action, source)
        derived_config = self._build_derived_config(action, source,
                                                    derived_inputs,
                                                    derived_options)
        derived_name = self.properties.get(self.NAME) or source.get(scl.NAME)
        return {
            scl.GROUP: source.get(scl.GROUP) or 'Heat::Ungrouped',
            scl.CONFIG: derived_config or self.empty_config(),
            scl.OPTIONS: derived_options,
            scl.INPUTS: derived_inputs,
            scl.OUTPUTS: source.get(scl.OUTPUTS),
            scl.NAME: derived_name or self.physical_resource_name()
        }

    def _build_derived_config(self, action, source, derived_inputs,
                              derived_options):
        return source.get(sc.SoftwareConfig.CONFIG)

    def _build_derived_options(self, action, source):
        return source.get(sc.SoftwareConfig.OPTIONS)

    def _build_derived_inputs(self, action, source):
        scl = sc.SoftwareConfig
        inputs = copy.deepcopy(source.get(scl.INPUTS)) or []
        input_values = dict(self.properties.get(self.INPUT_VALUES) or {})

        for inp in inputs:
            input_key = inp[scl.NAME]
            inp['value'] = input_values.pop(input_key, inp[scl.DEFAULT])

        # for any input values that do not have a declared input, add
        # a derived declared input so that they can be used as config
        # inputs
        for inpk, inpv in input_values.items():
            inputs.append({scl.NAME: inpk, scl.TYPE: 'String', 'value': inpv})

        inputs.extend([{
            scl.NAME: self.DEPLOY_SERVER_ID,
            scl.DESCRIPTION: _('ID of the server being deployed to'),
            scl.TYPE: 'String',
            'value': self.properties[self.SERVER]
        }, {
            scl.NAME:
            self.DEPLOY_ACTION,
            scl.DESCRIPTION:
            _('Name of the current action being deployed'),
            scl.TYPE:
            'String',
            'value':
            action
        }, {
            scl.NAME:
            self.DEPLOY_STACK_ID,
            scl.DESCRIPTION:
            _('ID of the stack this deployment belongs to'),
            scl.TYPE:
            'String',
            'value':
            self.stack.identifier().stack_path()
        }, {
            scl.NAME:
            self.DEPLOY_RESOURCE_NAME,
            scl.DESCRIPTION:
            _('Name of this deployment resource in the '
              'stack'),
            scl.TYPE:
            'String',
            'value':
            self.name
        }, {
            scl.NAME:
            self.DEPLOY_SIGNAL_TRANSPORT,
            scl.DESCRIPTION:
            _('How the server should signal to heat with '
              'the deployment output values.'),
            scl.TYPE:
            'String',
            'value':
            self.properties[self.SIGNAL_TRANSPORT]
        }])
        if self._signal_transport_cfn():
            inputs.append({
                scl.NAME:
                self.DEPLOY_SIGNAL_ID,
                scl.DESCRIPTION:
                _('ID of signal to use for signaling '
                  'output values'),
                scl.TYPE:
                'String',
                'value':
                self._get_ec2_signed_url()
            })
            inputs.append({
                scl.NAME:
                self.DEPLOY_SIGNAL_VERB,
                scl.DESCRIPTION:
                _('HTTP verb to use for signaling '
                  'output values'),
                scl.TYPE:
                'String',
                'value':
                'POST'
            })
        elif self._signal_transport_temp_url():
            inputs.append({
                scl.NAME:
                self.DEPLOY_SIGNAL_ID,
                scl.DESCRIPTION:
                _('ID of signal to use for signaling '
                  'output values'),
                scl.TYPE:
                'String',
                'value':
                self._get_swift_signal_url()
            })
            inputs.append({
                scl.NAME:
                self.DEPLOY_SIGNAL_VERB,
                scl.DESCRIPTION:
                _('HTTP verb to use for signaling '
                  'output values'),
                scl.TYPE:
                'String',
                'value':
                'PUT'
            })
        elif self._signal_transport_heat() or self._signal_transport_zaqar():
            creds = self._get_heat_signal_credentials()
            inputs.extend([{
                scl.NAME: self.DEPLOY_AUTH_URL,
                scl.DESCRIPTION: _('URL for API authentication'),
                scl.TYPE: 'String',
                'value': creds['auth_url']
            }, {
                scl.NAME:
                self.DEPLOY_USERNAME,
                scl.DESCRIPTION:
                _('Username for API authentication'),
                scl.TYPE:
                'String',
                'value':
                creds['username']
            }, {
                scl.NAME: self.DEPLOY_USER_ID,
                scl.DESCRIPTION: _('User ID for API authentication'),
                scl.TYPE: 'String',
                'value': creds['user_id']
            }, {
                scl.NAME:
                self.DEPLOY_PASSWORD,
                scl.DESCRIPTION:
                _('Password for API authentication'),
                scl.TYPE:
                'String',
                'value':
                creds['password']
            }, {
                scl.NAME:
                self.DEPLOY_PROJECT_ID,
                scl.DESCRIPTION:
                _('ID of project for API authentication'),
                scl.TYPE:
                'String',
                'value':
                creds['project_id']
            }])
        if self._signal_transport_zaqar():
            inputs.append({
                scl.NAME:
                self.DEPLOY_QUEUE_ID,
                scl.DESCRIPTION:
                _('ID of queue to use for signaling '
                  'output values'),
                scl.TYPE:
                'String',
                'value':
                self._get_zaqar_signal_queue_id()
            })

        return inputs

    def handle_create(self):
        return self._handle_action(self.CREATE)

    def check_create_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.properties = json_snippet.properties(self.properties_schema,
                                                      self.context)

        return self._handle_action(self.UPDATE)

    def check_update_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_delete(self):
        try:
            return self._handle_action(self.DELETE)
        except Exception as ex:
            self.rpc_client().ignore_error_named(ex, 'NotFound')

    def check_delete_complete(self, sd=None):
        if not sd or self._check_complete():
            self._delete_resource()
            return True

    def _delete_resource(self):
        self._delete_signals()
        self._delete_user()

        derived_config_id = None
        if self.resource_id is not None:
            try:
                sd = self.rpc_client().show_software_deployment(
                    self.context, self.resource_id)
                derived_config_id = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID]
                self.rpc_client().delete_software_deployment(
                    self.context, self.resource_id)
            except Exception as ex:
                self.rpc_client().ignore_error_named(ex, 'NotFound')

        if derived_config_id:
            self._delete_derived_config(derived_config_id)

    def handle_suspend(self):
        return self._handle_action(self.SUSPEND)

    def check_suspend_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_resume(self):
        return self._handle_action(self.RESUME)

    def check_resume_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_signal(self, details):
        return self.rpc_client().signal_software_deployment(
            self.context, self.resource_id, details,
            timeutils.utcnow().isoformat())

    def get_attribute(self, key, *path):
        """Resource attributes map to deployment outputs values."""
        sd = self.rpc_client().show_software_deployment(
            self.context, self.resource_id)
        ov = sd[rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_VALUES] or {}
        if key in ov:
            attribute = ov.get(key)
            return attributes.select_from_attribute(attribute, path)

        # Since there is no value for this key yet, check the output schemas
        # to find out if the key is valid
        sc = self.rpc_client().show_software_config(
            self.context, self.properties[self.CONFIG])
        outputs = sc[rpc_api.SOFTWARE_CONFIG_OUTPUTS] or []
        output_keys = [output['name'] for output in outputs]
        if key not in output_keys and key not in self.ATTRIBUTES:
            raise exception.InvalidTemplateAttribute(resource=self.name,
                                                     key=key)
        return None

    def validate(self):
        """Validate any of the provided params.

        :raises StackValidationFailed: if any property failed validation.
        """
        super(SoftwareDeployment, self).validate()
        server = self.properties[self.SERVER]
        if server:
            res = self.stack.resource_by_refid(server)
            if res:
                if not (res.properties.get('user_data_format')
                        == 'SOFTWARE_CONFIG'):
                    raise exception.StackValidationFailed(message=_(
                        "Resource %s's property user_data_format should be "
                        "set to SOFTWARE_CONFIG since there are software "
                        "deployments on it.") % server)
コード例 #11
0
ファイル: subnet.py プロジェクト: Hybrid-Cloud/conveyor
class Subnet(neutron.NeutronResource):
    """A resource for managing Neutron subnets.

    A subnet represents an IP address block that can be used for assigning IP
    addresses to virtual instances. Each subnet must have a CIDR and must be
    associated with a network. IPs can be either selected from the whole subnet
    CIDR, or from "allocation pools" that can be specified by the user.
    """

    PROPERTIES = (
        NETWORK_ID,
        NETWORK,
        SUBNETPOOL,
        PREFIXLEN,
        CIDR,
        VALUE_SPECS,
        NAME,
        IP_VERSION,
        DNS_NAMESERVERS,
        GATEWAY_IP,
        ENABLE_DHCP,
        ALLOCATION_POOLS,
        TENANT_ID,
        HOST_ROUTES,
        IPV6_RA_MODE,
        IPV6_ADDRESS_MODE,
    ) = (
        'network_id',
        'network',
        'subnetpool',
        'prefixlen',
        'cidr',
        'value_specs',
        'name',
        'ip_version',
        'dns_nameservers',
        'gateway_ip',
        'enable_dhcp',
        'allocation_pools',
        'tenant_id',
        'host_routes',
        'ipv6_ra_mode',
        'ipv6_address_mode',
    )

    _ALLOCATION_POOL_KEYS = (
        ALLOCATION_POOL_START,
        ALLOCATION_POOL_END,
    ) = (
        'start',
        'end',
    )

    _HOST_ROUTES_KEYS = (
        ROUTE_DESTINATION,
        ROUTE_NEXTHOP,
    ) = (
        'destination',
        'nexthop',
    )

    _IPV6_DHCP_MODES = (
        DHCPV6_STATEFUL,
        DHCPV6_STATELESS,
        SLAAC,
    ) = (
        'dhcpv6-stateful',
        'dhcpv6-stateless',
        'slaac',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        NETWORK_ID_ATTR,
        TENANT_ID_ATTR,
        ALLOCATION_POOLS_ATTR,
        GATEWAY_IP_ATTR,
        HOST_ROUTES_ATTR,
        IP_VERSION_ATTR,
        CIDR_ATTR,
        DNS_NAMESERVERS_ATTR,
        ENABLE_DHCP_ATTR,
    ) = (
        'name',
        'network_id',
        'tenant_id',
        'allocation_pools',
        'gateway_ip',
        'host_routes',
        'ip_version',
        'cidr',
        'dns_nameservers',
        'enable_dhcp',
    )

    properties_schema = {
        NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % NETWORK,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the attached network.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.network')],
            support_status=support.SupportStatus(version='2014.2')),
        SUBNETPOOL:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of the subnet pool.'),
            constraints=[constraints.CustomConstraint('neutron.subnetpool')],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Prefix length for subnet allocation from subnet pool.'),
            constraints=[constraints.Range(min=0)],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _('The CIDR.'),
            constraints=[constraints.CustomConstraint('net_cidr')]),
        VALUE_SPECS:
        properties.Schema(properties.Schema.MAP,
                          _('Extra parameters to include in the request.'),
                          default={},
                          update_allowed=True),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of the subnet.'),
                          update_allowed=True),
        IP_VERSION:
        properties.Schema(properties.Schema.INTEGER,
                          _('The IP version, which is 4 or 6.'),
                          default=4,
                          constraints=[
                              constraints.AllowedValues([4, 6]),
                          ]),
        DNS_NAMESERVERS:
        properties.Schema(properties.Schema.LIST,
                          _('A specified set of DNS name servers to be used.'),
                          default=[],
                          update_allowed=True),
        GATEWAY_IP:
        properties.Schema(
            properties.Schema.STRING,
            _('The gateway IP address. Set to any of [ null | ~ | "" ] '
              'to create/update a subnet without a gateway. '
              'If omitted when creation, neutron will assign the first '
              'free IP address within the subnet to the gateway '
              'automatically. If remove this from template when update, '
              'the old gateway IP address will be detached.'),
            update_allowed=True),
        ENABLE_DHCP:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Set to true if DHCP is enabled and false if DHCP is disabled.'),
            default=True,
            update_allowed=True),
        ALLOCATION_POOLS:
        properties.Schema(
            properties.Schema.LIST,
            _('The start and end addresses for the allocation pools.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOCATION_POOL_START:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Start address for the allocation pool.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                    ALLOCATION_POOL_END:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('End address for the allocation pool.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the tenant who owns the network. Only administrative '
              'users can specify a tenant ID other than their own.')),
        HOST_ROUTES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of host route dictionaries for the subnet.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ROUTE_DESTINATION:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The destination for static route.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('net_cidr')
                                     ]),
                    ROUTE_NEXTHOP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The next hop for the destination.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        IPV6_RA_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('IPv6 RA (Router Advertisement) mode.'),
            constraints=[
                constraints.AllowedValues(
                    [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]),
            ],
            support_status=support.SupportStatus(version='2015.1')),
        IPV6_ADDRESS_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('IPv6 address mode.'),
            constraints=[
                constraints.AllowedValues(
                    [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]),
            ],
            support_status=support.SupportStatus(version='2015.1')),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the subnet."),
                          type=attributes.Schema.STRING),
        NETWORK_ID_ATTR:
        attributes.Schema(_("Parent network of the subnet."),
                          type=attributes.Schema.STRING),
        TENANT_ID_ATTR:
        attributes.Schema(_("Tenant owning the subnet."),
                          type=attributes.Schema.STRING),
        ALLOCATION_POOLS_ATTR:
        attributes.Schema(_("Ip allocation pools and their ranges."),
                          type=attributes.Schema.LIST),
        GATEWAY_IP_ATTR:
        attributes.Schema(_("Ip of the subnet's gateway."),
                          type=attributes.Schema.STRING),
        HOST_ROUTES_ATTR:
        attributes.Schema(_("Additional routes for this subnet."),
                          type=attributes.Schema.LIST),
        IP_VERSION_ATTR:
        attributes.Schema(_("Ip version for the subnet."),
                          type=attributes.Schema.STRING),
        CIDR_ATTR:
        attributes.Schema(_("CIDR block notation for this subnet."),
                          type=attributes.Schema.STRING),
        DNS_NAMESERVERS_ATTR:
        attributes.Schema(_("List of dns nameservers."),
                          type=attributes.Schema.LIST),
        ENABLE_DHCP_ATTR:
        attributes.Schema(
            _("'true' if DHCP is enabled for this subnet; 'false' otherwise."),
            type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.NETWORK],
                                        value_path=[self.NETWORK_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.SUBNETPOOL],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='subnetpool')
        ]

    @classmethod
    def _null_gateway_ip(cls, props):
        if cls.GATEWAY_IP not in props:
            return
        # Specifying null in the gateway_ip will result in
        # a property containing an empty string.
        # A null gateway_ip has special meaning in the API
        # so this needs to be set back to None.
        # See bug https://bugs.launchpad.net/heat/+bug/1226666
        if props.get(cls.GATEWAY_IP) == '':
            props[cls.GATEWAY_IP] = None

    def validate(self):
        super(Subnet, self).validate()
        subnetpool = self.properties[self.SUBNETPOOL]
        prefixlen = self.properties[self.PREFIXLEN]
        cidr = self.properties[self.CIDR]
        if subnetpool and cidr:
            raise exception.ResourcePropertyConflict(self.SUBNETPOOL,
                                                     self.CIDR)
        if not subnetpool and not cidr:
            raise exception.PropertyUnspecifiedError(self.SUBNETPOOL,
                                                     self.CIDR)
        if prefixlen and cidr:
            raise exception.ResourcePropertyConflict(self.PREFIXLEN, self.CIDR)
        ra_mode = self.properties[self.IPV6_RA_MODE]
        address_mode = self.properties[self.IPV6_ADDRESS_MODE]

        if (self.properties[self.IP_VERSION] == 4) and (ra_mode
                                                        or address_mode):
            msg = _('ipv6_ra_mode and ipv6_address_mode are not supported '
                    'for ipv4.')
            raise exception.StackValidationFailed(message=msg)
        if ra_mode and address_mode and (ra_mode != address_mode):
            msg = _('When both ipv6_ra_mode and ipv6_address_mode are set, '
                    'they must be equal.')
            raise exception.StackValidationFailed(message=msg)

        gateway_ip = self.properties.get(self.GATEWAY_IP)
        if (gateway_ip and gateway_ip not in ['~', '']
                and not netutils.is_valid_ip(gateway_ip)):
            msg = (_('Gateway IP address "%(gateway)s" is in '
                     'invalid format.'), gateway_ip)
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['network_id'] = props.pop(self.NETWORK)
        if self.SUBNETPOOL in props and props[self.SUBNETPOOL]:
            props['subnetpool_id'] = props.pop('subnetpool')
        self._null_gateway_ip(props)
        subnet = self.client().create_subnet({'subnet': props})['subnet']
        self.resource_id_set(subnet['id'])

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().delete_subnet(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _show_resource(self):
        return self.client().show_subnet(self.resource_id)['subnet']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            if (self.ALLOCATION_POOLS in prop_diff
                    and prop_diff[self.ALLOCATION_POOLS] is None):
                prop_diff[self.ALLOCATION_POOLS] = []

            # If the new value is '', set to None
            self._null_gateway_ip(prop_diff)

            self.client().update_subnet(self.resource_id,
                                        {'subnet': prop_diff})

    def is_allow_replace(self):
        return True
コード例 #12
0
ファイル: image.py プロジェクト: Hybrid-Cloud/conveyor
class GlanceImage(resource.Resource):
    """A resource managing images in Glance.

    A resource provides managing images that are meant to be used with other
    services.
    """

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
        DISK_FORMAT, CONTAINER_FORMAT, LOCATION
    ) = (
        'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected',
        'disk_format', 'container_format', 'location'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name for the image. The name of an image is not '
              'unique to a Image Service node.')
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _('The image ID. Glance will generate a UUID if not specified.')
        ),
        IS_PUBLIC: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Scope of image accessibility. Public or private. '
              'Default value is False means private.'),
            default=False,
        ),
        MIN_DISK: properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of disk space (in GB) required to boot image. '
              'Default value is 0 if not specified '
              'and means no limit on the disk size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0
        ),
        MIN_RAM: properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of ram (in MB) required to boot image. Default value '
              'is 0 if not specified and means no limit on the ram size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0
        ),
        PROTECTED: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the image can be deleted. If the value is True, '
              'the image is protected and cannot be deleted.'),
            default=False
        ),
        DISK_FORMAT: properties.Schema(
            properties.Schema.STRING,
            _('Disk format of image.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ami', 'ari', 'aki',
                                           'vhd', 'vmdk', 'raw',
                                           'qcow2', 'vdi', 'iso'])
            ]
        ),
        CONTAINER_FORMAT: properties.Schema(
            properties.Schema.STRING,
            _('Container format of image.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ami', 'ari', 'aki',
                                           'bare', 'ova', 'ovf'])
            ]
        ),
        LOCATION: properties.Schema(
            properties.Schema.STRING,
            _('URL where the data for this image already resides. For '
              'example, if the image data is stored in swift, you could '
              'specify "swift://example.com/container/obj".'),
            required=True,
        ),
    }

    default_client_name = 'glance'

    entity = 'images'

    def handle_create(self):
        args = dict((k, v) for k, v in self.properties.items()
                    if v is not None)
        image_id = self.client().images.create(**args).id
        self.resource_id_set(image_id)
        return image_id

    def check_create_complete(self, image_id):
        image = self.client().images.get(image_id)
        return image.status == 'active'

    def _show_resource(self):
        if self.glance().version == 1.0:
            return super(GlanceImage, self)._show_resource()
        else:
            image = self.glance().images.get(self.resource_id)
            return dict(image)

    def validate(self):
        super(GlanceImage, self).validate()
        container_format = self.properties[self.CONTAINER_FORMAT]
        if (container_format in ['ami', 'ari', 'aki']
                and self.properties[self.DISK_FORMAT] != container_format):
            msg = _("Invalid mix of disk and container formats. When "
                    "setting a disk or container format to one of 'aki', "
                    "'ari', or 'ami', the container and disk formats must "
                    "match.")
            raise exception.StackValidationFailed(message=msg)

    def get_live_resource_data(self):
        image_data = super(GlanceImage, self).get_live_resource_data()
        if image_data.get('status') in ('deleted', 'killed'):
                raise exception.EntityNotFound(entity='Resource',
                                               name=self.name)
        return image_data

    def parse_live_resource_data(self, resource_properties, resource_data):
        image_reality = {}

        # NOTE(prazumovsky): At first, there's no way to get location from
        # glance; at second, location property is doubtful, because glance
        # client v2 doesn't use location, it uses locations. So, we should
        # get location property from resource properties.
        if self.client().version == 1.0:
            image_reality.update(
                {self.LOCATION: resource_properties[self.LOCATION]})

        for key in self.PROPERTIES:
            if key == self.LOCATION:
                continue
            if key == self.IMAGE_ID:
                if (resource_properties.get(self.IMAGE_ID) is not None or
                        resource_data.get(self.IMAGE_ID) != self.resource_id):
                    image_reality.update({self.IMAGE_ID: resource_data.get(
                        self.IMAGE_ID)})
                else:
                    image_reality.update({self.IMAGE_ID: None})
            else:
                image_reality.update({key: resource_data.get(key)})

        return image_reality
コード例 #13
0
class AddressScope(neutron.NeutronResource):
    """A resource for Neutron address scope.

    This resource can be associated with multiple subnet pools
    in a one-to-many relationship. The subnet pools under an
    address scope must not overlap.
    """

    required_service_extension = 'address-scope'

    support_status = support.SupportStatus(version='6.0.0')

    PROPERTIES = (
        NAME,
        SHARED,
        TENANT_ID,
        IP_VERSION,
    ) = (
        'name',
        'shared',
        'tenant_id',
        'ip_version',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name for the address scope.'),
                          update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the address scope should be shared to other '
              'tenants. Note that the default policy setting '
              'restricts usage of this attribute to administrative '
              'users only, and restricts changing of shared address scope '
              'to unshared with update.'),
            default=False,
            update_allowed=True),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The owner tenant ID of the address scope. Only '
              'administrative users can specify a tenant ID '
              'other than their own.'),
            constraints=[constraints.CustomConstraint('keystone.project')]),
        IP_VERSION:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Address family of the address scope, which is 4 or 6.'),
            default=4,
            constraints=[
                constraints.AllowedValues([4, 6]),
            ]),
    }

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        address_scope = self.client().create_address_scope(
            {'address_scope': props})['address_scope']
        self.resource_id_set(address_scope['id'])

    def handle_delete(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            self.client().delete_address_scope(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_address_scope(self.resource_id,
                                               {'address_scope': prop_diff})

    def _show_resource(self):
        return self.client().show_address_scope(
            self.resource_id)['address_scope']
コード例 #14
0
ファイル: vpnservice.py プロジェクト: Hybrid-Cloud/conveyor
class IPsecPolicy(neutron.NeutronResource):
    """A resource for IPsec policy in Neutron.

    The IP security policy specifying the authentication and encryption
    algorithm, and encapsulation mode used for the established VPN connection.
    """

    required_service_extension = 'vpnaas'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        TRANSFORM_PROTOCOL,
        ENCAPSULATION_MODE,
        AUTH_ALGORITHM,
        ENCRYPTION_ALGORITHM,
        LIFETIME,
        PFS,
    ) = (
        'name',
        'description',
        'transform_protocol',
        'encapsulation_mode',
        'auth_algorithm',
        'encryption_algorithm',
        'lifetime',
        'pfs',
    )

    _LIFETIME_KEYS = (
        LIFETIME_UNITS,
        LIFETIME_VALUE,
    ) = (
        'units',
        'value',
    )

    ATTRIBUTES = (
        AUTH_ALGORITHM_ATTR,
        DESCRIPTION_ATTR,
        ENCAPSULATION_MODE_ATTR,
        ENCRYPTION_ALGORITHM_ATTR,
        LIFETIME_ATTR,
        NAME_ATTR,
        PFS_ATTR,
        TENANT_ID,
        TRANSFORM_PROTOCOL_ATTR,
    ) = (
        'auth_algorithm',
        'description',
        'encapsulation_mode',
        'encryption_algorithm',
        'lifetime',
        'name',
        'pfs',
        'tenant_id',
        'transform_protocol',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the ipsec policy.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the ipsec policy.'),
                          update_allowed=True),
        TRANSFORM_PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          _('Transform protocol for the ipsec policy.'),
                          default='esp',
                          constraints=[
                              constraints.AllowedValues(
                                  ['esp', 'ah', 'ah-esp']),
                          ]),
        ENCAPSULATION_MODE:
        properties.Schema(properties.Schema.STRING,
                          _('Encapsulation mode for the ipsec policy.'),
                          default='tunnel',
                          constraints=[
                              constraints.AllowedValues(
                                  ['tunnel', 'transport']),
                          ]),
        AUTH_ALGORITHM:
        properties.Schema(
            properties.Schema.STRING,
            _('Authentication hash algorithm for the ipsec policy.'),
            default='sha1',
            constraints=[
                constraints.AllowedValues(['sha1']),
            ]),
        ENCRYPTION_ALGORITHM:
        properties.Schema(properties.Schema.STRING,
                          _('Encryption algorithm for the ipsec policy.'),
                          default='aes-128',
                          constraints=[
                              constraints.AllowedValues(
                                  ['3des', 'aes-128', 'aes-192', 'aes-256']),
                          ]),
        LIFETIME:
        properties.Schema(
            properties.Schema.MAP,
            _('Safety assessment lifetime configuration for the ipsec '
              'policy.'),
            schema={
                LIFETIME_UNITS:
                properties.Schema(properties.Schema.STRING,
                                  _('Safety assessment lifetime units.'),
                                  default='seconds',
                                  constraints=[
                                      constraints.AllowedValues(
                                          ['seconds', 'kilobytes']),
                                  ]),
                LIFETIME_VALUE:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Safety assessment lifetime value in specified '
                      'units.'),
                    default=3600),
            }),
        PFS:
        properties.Schema(properties.Schema.STRING,
                          _('Perfect forward secrecy for the ipsec policy.'),
                          default='group5',
                          constraints=[
                              constraints.AllowedValues(
                                  ['group2', 'group5', 'group14']),
                          ]),
    }

    attributes_schema = {
        AUTH_ALGORITHM_ATTR:
        attributes.Schema(
            _('The authentication hash algorithm of the ipsec policy.'),
            type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('The description of the ipsec policy.'),
                          type=attributes.Schema.STRING),
        ENCAPSULATION_MODE_ATTR:
        attributes.Schema(_('The encapsulation mode of the ipsec policy.'),
                          type=attributes.Schema.STRING),
        ENCRYPTION_ALGORITHM_ATTR:
        attributes.Schema(_('The encryption algorithm of the ipsec policy.'),
                          type=attributes.Schema.STRING),
        LIFETIME_ATTR:
        attributes.Schema(_(
            'The safety assessment lifetime configuration of the ipsec '
            'policy.'),
                          type=attributes.Schema.MAP),
        NAME_ATTR:
        attributes.Schema(_('The name of the ipsec policy.'),
                          type=attributes.Schema.STRING),
        PFS_ATTR:
        attributes.Schema(
            _('The perfect forward secrecy of the ipsec policy.'),
            type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(
            _('The unique identifier of the tenant owning the ipsec policy.'),
            type=attributes.Schema.STRING),
        TRANSFORM_PROTOCOL_ATTR:
        attributes.Schema(_('The transform protocol of the ipsec policy.'),
                          type=attributes.Schema.STRING),
    }

    def _show_resource(self):
        return self.client().show_ipsecpolicy(self.resource_id)['ipsecpolicy']

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        ipsecpolicy = self.client().create_ipsecpolicy({'ipsecpolicy':
                                                        props})['ipsecpolicy']
        self.resource_id_set(ipsecpolicy['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_ipsecpolicy(self.resource_id,
                                             {'ipsecpolicy': prop_diff})

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().delete_ipsecpolicy(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
コード例 #15
0
class MeteringRule(neutron.NeutronResource):
    """A resource to create rule for some label.

    Resource for allowing specified label to measure the traffic for a specific
    set of ip range.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        METERING_LABEL_ID, REMOTE_IP_PREFIX, DIRECTION, EXCLUDED,
    ) = (
        'metering_label_id', 'remote_ip_prefix', 'direction', 'excluded',
    )

    ATTRIBUTES = (
        DIRECTION_ATTR, EXCLUDED_ATTR, METERING_LABEL_ID_ATTR,
        REMOTE_IP_PREFIX_ATTR,
    ) = (
        'direction', 'excluded', 'metering_label_id',
        'remote_ip_prefix',
    )

    properties_schema = {
        METERING_LABEL_ID: properties.Schema(
            properties.Schema.STRING,
            _('The metering label ID to associate with this metering rule.'),
            required=True
        ),
        REMOTE_IP_PREFIX: properties.Schema(
            properties.Schema.STRING,
            _('Indicates remote IP prefix to be associated with this '
              'metering rule.'),
            required=True,
        ),
        DIRECTION: properties.Schema(
            properties.Schema.STRING,
            _('The direction in which metering rule is applied, '
              'either ingress or egress.'),
            default='ingress',
            constraints=[constraints.AllowedValues((
                'ingress', 'egress'))]
        ),
        EXCLUDED: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Specify whether the remote_ip_prefix will be excluded or '
              'not from traffic counters of the metering label. For example '
              'to not count the traffic of a specific IP address of a range.'),
            default='False'
        )
    }

    attributes_schema = {
        DIRECTION_ATTR: attributes.Schema(
            _('The direction in which metering rule is applied.'),
            type=attributes.Schema.STRING
        ),
        EXCLUDED_ATTR: attributes.Schema(
            _('Exclude state for cidr.'),
            type=attributes.Schema.STRING
        ),
        METERING_LABEL_ID_ATTR: attributes.Schema(
            _('The metering label ID to associate with this metering rule.'),
            type=attributes.Schema.STRING
        ),
        REMOTE_IP_PREFIX_ATTR: attributes.Schema(
            _('CIDR to be associated with this metering rule.'),
            type=attributes.Schema.STRING
        ),
    }

    def handle_create(self):
        props = self.prepare_properties(
            self.properties,
            self.physical_resource_name())

        metering_label_rule = self.client().create_metering_label_rule(
            {'metering_label_rule': props})['metering_label_rule']

        self.resource_id_set(metering_label_rule['id'])

    def _show_resource(self):
        return self.client().show_metering_label_rule(
            self.resource_id)['metering_label_rule']

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().delete_metering_label_rule(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
コード例 #16
0
ファイル: vpnservice.py プロジェクト: Hybrid-Cloud/conveyor
class IKEPolicy(neutron.NeutronResource):
    """A resource for IKE policy in Neutron.

    The Internet Key Exchange policy identifyies the authentication and
    encryption algorithm used during phase one and phase two negotiation of a
    VPN connection.
    """

    required_service_extension = 'vpnaas'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        AUTH_ALGORITHM,
        ENCRYPTION_ALGORITHM,
        PHASE1_NEGOTIATION_MODE,
        LIFETIME,
        PFS,
        IKE_VERSION,
    ) = (
        'name',
        'description',
        'auth_algorithm',
        'encryption_algorithm',
        'phase1_negotiation_mode',
        'lifetime',
        'pfs',
        'ike_version',
    )

    _LIFETIME_KEYS = (
        LIFETIME_UNITS,
        LIFETIME_VALUE,
    ) = (
        'units',
        'value',
    )

    ATTRIBUTES = (
        AUTH_ALGORITHM_ATTR,
        DESCRIPTION_ATTR,
        ENCRYPTION_ALGORITHM_ATTR,
        IKE_VERSION_ATTR,
        LIFETIME_ATTR,
        NAME_ATTR,
        PFS_ATTR,
        PHASE1_NEGOTIATION_MODE_ATTR,
        TENANT_ID,
    ) = (
        'auth_algorithm',
        'description',
        'encryption_algorithm',
        'ike_version',
        'lifetime',
        'name',
        'pfs',
        'phase1_negotiation_mode',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the ike policy.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the ike policy.'),
                          update_allowed=True),
        AUTH_ALGORITHM:
        properties.Schema(
            properties.Schema.STRING,
            _('Authentication hash algorithm for the ike policy.'),
            default='sha1',
            constraints=[
                constraints.AllowedValues(['sha1']),
            ]),
        ENCRYPTION_ALGORITHM:
        properties.Schema(properties.Schema.STRING,
                          _('Encryption algorithm for the ike policy.'),
                          default='aes-128',
                          constraints=[
                              constraints.AllowedValues(
                                  ['3des', 'aes-128', 'aes-192', 'aes-256']),
                          ]),
        PHASE1_NEGOTIATION_MODE:
        properties.Schema(properties.Schema.STRING,
                          _('Negotiation mode for the ike policy.'),
                          default='main',
                          constraints=[
                              constraints.AllowedValues(['main']),
                          ]),
        LIFETIME:
        properties.Schema(
            properties.Schema.MAP,
            _('Safety assessment lifetime configuration for the ike policy.'),
            schema={
                LIFETIME_UNITS:
                properties.Schema(properties.Schema.STRING,
                                  _('Safety assessment lifetime units.'),
                                  default='seconds',
                                  constraints=[
                                      constraints.AllowedValues(
                                          ['seconds', 'kilobytes']),
                                  ]),
                LIFETIME_VALUE:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Safety assessment lifetime value in specified '
                      'units.'),
                    default=3600),
            }),
        PFS:
        properties.Schema(
            properties.Schema.STRING,
            _('Perfect forward secrecy in lowercase for the ike policy.'),
            default='group5',
            constraints=[
                constraints.AllowedValues(['group2', 'group5', 'group14']),
            ]),
        IKE_VERSION:
        properties.Schema(properties.Schema.STRING,
                          _('Version for the ike policy.'),
                          default='v1',
                          constraints=[
                              constraints.AllowedValues(['v1', 'v2']),
                          ]),
    }

    attributes_schema = {
        AUTH_ALGORITHM_ATTR:
        attributes.Schema(
            _('The authentication hash algorithm used by the ike policy.'),
            type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('The description of the ike policy.'),
                          type=attributes.Schema.STRING),
        ENCRYPTION_ALGORITHM_ATTR:
        attributes.Schema(
            _('The encryption algorithm used by the ike policy.'),
            type=attributes.Schema.STRING),
        IKE_VERSION_ATTR:
        attributes.Schema(_('The version of the ike policy.'),
                          type=attributes.Schema.STRING),
        LIFETIME_ATTR:
        attributes.Schema(_(
            'The safety assessment lifetime configuration for the ike '
            'policy.'),
                          type=attributes.Schema.MAP),
        NAME_ATTR:
        attributes.Schema(_('The name of the ike policy.'),
                          type=attributes.Schema.STRING),
        PFS_ATTR:
        attributes.Schema(_('The perfect forward secrecy of the ike policy.'),
                          type=attributes.Schema.STRING),
        PHASE1_NEGOTIATION_MODE_ATTR:
        attributes.Schema(_('The negotiation mode of the ike policy.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(
            _('The unique identifier of the tenant owning the ike policy.'),
            type=attributes.Schema.STRING),
    }

    def _show_resource(self):
        return self.client().show_ikepolicy(self.resource_id)['ikepolicy']

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        ikepolicy = self.client().create_ikepolicy({'ikepolicy':
                                                    props})['ikepolicy']
        self.resource_id_set(ikepolicy['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_ikepolicy(self.resource_id,
                                           {'ikepolicy': prop_diff})

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().delete_ikepolicy(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
コード例 #17
0
ファイル: vpnservice.py プロジェクト: Hybrid-Cloud/conveyor
class IPsecSiteConnection(neutron.NeutronResource):
    """A resource for IPsec site connection in Neutron.

    This resource has details for the site-to-site IPsec connection, including
    the peer CIDRs, MTU, peer address, DPD settings and status.
    """

    required_service_extension = 'vpnaas'

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        PEER_ADDRESS,
        PEER_ID,
        PEER_CIDRS,
        MTU,
        DPD,
        PSK,
        INITIATOR,
        ADMIN_STATE_UP,
        IKEPOLICY_ID,
        IPSECPOLICY_ID,
        VPNSERVICE_ID,
    ) = (
        'name',
        'description',
        'peer_address',
        'peer_id',
        'peer_cidrs',
        'mtu',
        'dpd',
        'psk',
        'initiator',
        'admin_state_up',
        'ikepolicy_id',
        'ipsecpolicy_id',
        'vpnservice_id',
    )

    _DPD_KEYS = (
        DPD_ACTIONS,
        DPD_INTERVAL,
        DPD_TIMEOUT,
    ) = (
        'actions',
        'interval',
        'timeout',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        AUTH_MODE,
        DESCRIPTION_ATTR,
        DPD_ATTR,
        IKEPOLICY_ID_ATTR,
        INITIATOR_ATTR,
        IPSECPOLICY_ID_ATTR,
        MTU_ATTR,
        NAME_ATTR,
        PEER_ADDRESS_ATTR,
        PEER_CIDRS_ATTR,
        PEER_ID_ATTR,
        PSK_ATTR,
        ROUTE_MODE,
        STATUS,
        TENANT_ID,
        VPNSERVICE_ID_ATTR,
    ) = (
        'admin_state_up',
        'auth_mode',
        'description',
        'dpd',
        'ikepolicy_id',
        'initiator',
        'ipsecpolicy_id',
        'mtu',
        'name',
        'peer_address',
        'peer_cidrs',
        'peer_id',
        'psk',
        'route_mode',
        'status',
        'tenant_id',
        'vpnservice_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the ipsec site connection.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the ipsec site connection.'),
                          update_allowed=True),
        PEER_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Remote branch router public IPv4 address or IPv6 address or '
              'FQDN.'),
            required=True),
        PEER_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Remote branch router identity.'),
                          required=True),
        PEER_CIDRS:
        properties.Schema(
            properties.Schema.LIST,
            _('Remote subnet(s) in CIDR format.'),
            required=True,
            schema=properties.Schema(
                properties.Schema.STRING,
                constraints=[constraints.CustomConstraint('net_cidr')])),
        MTU:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum transmission unit size (in bytes) for the ipsec site '
              'connection.'),
            default=1500),
        DPD:
        properties.Schema(
            properties.Schema.MAP,
            _('Dead Peer Detection protocol configuration for the ipsec site '
              'connection.'),
            schema={
                DPD_ACTIONS:
                properties.Schema(properties.Schema.STRING,
                                  _('Controls DPD protocol mode.'),
                                  default='hold',
                                  constraints=[
                                      constraints.AllowedValues([
                                          'clear', 'disabled', 'hold',
                                          'restart', 'restart-by-peer'
                                      ]),
                                  ]),
                DPD_INTERVAL:
                properties.Schema(properties.Schema.INTEGER,
                                  _('Number of seconds for the DPD delay.'),
                                  default=30),
                DPD_TIMEOUT:
                properties.Schema(properties.Schema.INTEGER,
                                  _('Number of seconds for the DPD timeout.'),
                                  default=120),
            }),
        PSK:
        properties.Schema(
            properties.Schema.STRING,
            _('Pre-shared key string for the ipsec site connection.'),
            required=True),
        INITIATOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Initiator state in lowercase for the ipsec site connection.'),
            default='bi-directional',
            constraints=[
                constraints.AllowedValues(['bi-directional', 'response-only']),
            ]),
        ADMIN_STATE_UP:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Administrative state for the ipsec site connection.'),
            default=True,
            update_allowed=True),
        IKEPOLICY_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Unique identifier for the ike policy associated with the '
              'ipsec site connection.'),
            required=True),
        IPSECPOLICY_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Unique identifier for the ipsec policy associated with the '
              'ipsec site connection.'),
            required=True),
        VPNSERVICE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Unique identifier for the vpn service associated with the '
              'ipsec site connection.'),
            required=True),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(
            _('The administrative state of the ipsec site connection.'),
            type=attributes.Schema.STRING),
        AUTH_MODE:
        attributes.Schema(
            _('The authentication mode of the ipsec site connection.'),
            type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('The description of the ipsec site connection.'),
                          type=attributes.Schema.STRING),
        DPD_ATTR:
        attributes.Schema(_(
            'The dead peer detection protocol configuration of the ipsec '
            'site connection.'),
                          type=attributes.Schema.MAP),
        IKEPOLICY_ID_ATTR:
        attributes.Schema(_(
            'The unique identifier of ike policy associated with the ipsec '
            'site connection.'),
                          type=attributes.Schema.STRING),
        INITIATOR_ATTR:
        attributes.Schema(_('The initiator of the ipsec site connection.'),
                          type=attributes.Schema.STRING),
        IPSECPOLICY_ID_ATTR:
        attributes.Schema(_(
            'The unique identifier of ipsec policy associated with the '
            'ipsec site connection.'),
                          type=attributes.Schema.STRING),
        MTU_ATTR:
        attributes.Schema(_(
            'The maximum transmission unit size (in bytes) of the ipsec '
            'site connection.'),
                          type=attributes.Schema.STRING),
        NAME_ATTR:
        attributes.Schema(_('The name of the ipsec site connection.'),
                          type=attributes.Schema.STRING),
        PEER_ADDRESS_ATTR:
        attributes.Schema(_(
            'The remote branch router public IPv4 address or IPv6 address '
            'or FQDN.'),
                          type=attributes.Schema.STRING),
        PEER_CIDRS_ATTR:
        attributes.Schema(_(
            'The remote subnet(s) in CIDR format of the ipsec site '
            'connection.'),
                          type=attributes.Schema.LIST),
        PEER_ID_ATTR:
        attributes.Schema(_(
            'The remote branch router identity of the ipsec site '
            'connection.'),
                          type=attributes.Schema.STRING),
        PSK_ATTR:
        attributes.Schema(
            _('The pre-shared key string of the ipsec site connection.'),
            type=attributes.Schema.STRING),
        ROUTE_MODE:
        attributes.Schema(_('The route mode of the ipsec site connection.'),
                          type=attributes.Schema.STRING),
        STATUS:
        attributes.Schema(_('The status of the ipsec site connection.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_(
            'The unique identifier of the tenant owning the ipsec site '
            'connection.'),
                          type=attributes.Schema.STRING),
        VPNSERVICE_ID_ATTR:
        attributes.Schema(_(
            'The unique identifier of vpn service associated with the ipsec '
            'site connection.'),
                          type=attributes.Schema.STRING),
    }

    def _show_resource(self):
        return self.client().show_ipsec_site_connection(
            self.resource_id)['ipsec_site_connection']

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        ipsec_site_connection = self.client().create_ipsec_site_connection(
            {'ipsec_site_connection': props})['ipsec_site_connection']
        self.resource_id_set(ipsec_site_connection['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_ipsec_site_connection(
                self.resource_id, {'ipsec_site_connection': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_ipsec_site_connection(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
コード例 #18
0
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        AVAILABILITY_ZONES,
        LAUNCH_CONFIGURATION_NAME,
        MAX_SIZE,
        MIN_SIZE,
        COOLDOWN,
        DESIRED_CAPACITY,
        HEALTH_CHECK_GRACE_PERIOD,
        HEALTH_CHECK_TYPE,
        LOAD_BALANCER_NAMES,
        VPCZONE_IDENTIFIER,
        TAGS,
        INSTANCE_ID,
    ) = (
        'AvailabilityZones',
        'LaunchConfigurationName',
        'MaxSize',
        'MinSize',
        'Cooldown',
        'DesiredCapacity',
        'HealthCheckGracePeriod',
        'HealthCheckType',
        'LoadBalancerNames',
        'VPCZoneIdentifier',
        'Tags',
        'InstanceId',
    )

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE) = (
        'AutoScalingRollingUpdate')

    _ROLLING_UPDATE_SCHEMA_KEYS = (MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE,
                                   PAUSE_TIME) = ('MinInstancesInService',
                                                  'MaxBatchSize', 'PauseTime')

    ATTRIBUTES = (INSTANCE_LIST, ) = ('InstanceList', )

    properties_schema = {
        AVAILABILITY_ZONES:
        properties.Schema(properties.Schema.LIST,
                          _('Not Implemented.'),
                          required=True),
        LAUNCH_CONFIGURATION_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The reference to a LaunchConfiguration resource.'),
            update_allowed=True),
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of an existing instance to use to '
              'create the Auto Scaling group. If specify this property, '
              'will create the group use an existing instance instead of '
              'a launch configuration.'),
            constraints=[constraints.CustomConstraint("nova.server")]),
        MAX_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Maximum number of instances in the group.'),
                          required=True,
                          update_allowed=True),
        MIN_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Minimum number of instances in the group.'),
                          required=True,
                          update_allowed=True),
        COOLDOWN:
        properties.Schema(properties.Schema.INTEGER,
                          _('Cooldown period, in seconds.'),
                          update_allowed=True),
        DESIRED_CAPACITY:
        properties.Schema(properties.Schema.INTEGER,
                          _('Desired initial number of instances.'),
                          update_allowed=True),
        HEALTH_CHECK_GRACE_PERIOD:
        properties.Schema(properties.Schema.INTEGER,
                          _('Not Implemented.'),
                          implemented=False),
        HEALTH_CHECK_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          constraints=[
                              constraints.AllowedValues(['EC2', 'ELB']),
                          ],
                          implemented=False),
        LOAD_BALANCER_NAMES:
        properties.Schema(properties.Schema.LIST,
                          _('List of LoadBalancer resources.')),
        VPCZONE_IDENTIFIER:
        properties.Schema(
            properties.Schema.LIST,
            _('Use only with Neutron, to list the internal subnet to '
              'which the instance will be attached; '
              'needed only if multiple exist; '
              'list length must be exactly 1.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('UUID of the internal subnet to which the instance '
                  'will be attached.'))),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          _('Tags to attach to this group.'),
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                              },
                          )),
    }

    attributes_schema = {
        INSTANCE_LIST:
        attributes.Schema(_("A comma-delimited list of server ip addresses. "
                            "(Heat extension)."),
                          type=attributes.Schema.STRING),
    }

    rolling_update_schema = {
        MIN_INSTANCES_IN_SERVICE:
        properties.Schema(properties.Schema.INTEGER, default=0),
        MAX_BATCH_SIZE:
        properties.Schema(properties.Schema.INTEGER, default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.STRING, default='PT0S')
    }

    update_policy_schema = {
        ROLLING_UPDATE:
        properties.Schema(properties.Schema.MAP, schema=rolling_update_schema)
    }

    def handle_create(self):
        return self.create_with_template(self.child_template())

    def _get_members(self, group_id):
        members = []
        for res in self.stack.iter_resources(cfg.CONF.max_nested_stack_depth):
            if (res.type() in ['OS::Nova::Server']
                    and res.status == res.COMPLETE):
                members.append({
                    'id': res.resource_id,
                    'name': res.name,
                    'group_id': group_id
                })

        return members

    def _add_scheduler(self, group_id):
        task_args = {
            'group_name': 'groupwatch',
            'job_name': group_id,
            'job_type': 'period',
            'trigger_type': 'SIMPLE_TRIGGER',
            'interval': 240,
            'cover_flag': 'true',
            'end_time': 4076884800000,
            'meta_data': {
                'group_id': group_id,
                'project_id': self.context.tenant_id
            }
        }

        rsp = self.client('scheduler').scheduler.create(**task_args)
        return rsp.get('job_id')

    def _create_groupwatch(self):
        if not cfg.CONF.FusionSphere.groupwatch_enable:
            return

        group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id
        members = self._get_members(group_id)
        job_id = self._add_scheduler(group_id)
        kwargs = {
            'id': group_id,
            'name': self.name,
            'type': 'VM',
            'data': {
                'scheduler_job_id': job_id
            },
            'members': members
        }

        self.client('groupwatch').groups.create(**kwargs)

    def _make_launch_config_resource(self, name, props):
        lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
        lc_res_def = rsrc_defn.ResourceDefinition(name, lc_res_type, props)
        lc_res = resource.Resource(name, lc_res_def, self.stack)
        return lc_res

    def _get_conf_properties(self):
        instance_id = self.properties.get(self.INSTANCE_ID)
        if instance_id:
            server = self.client_plugin('nova').get_server(instance_id)
            instance_props = {
                'ImageId': server.image['id'],
                'InstanceType': server.flavor['id'],
                'KeyName': server.key_name,
                'SecurityGroups':
                [sg['name'] for sg in server.security_groups]
            }
            conf = self._make_launch_config_resource(self.name, instance_props)
            props = function.resolve(conf.properties.data)
        else:
            conf, props = super(AutoScalingGroup, self)._get_conf_properties()

        vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
        if vpc_zone_ids:
            props['SubnetId'] = vpc_zone_ids[0]

        return conf, props

    def check_create_complete(self, task):
        """Update cooldown timestamp after create succeeds."""
        done = super(AutoScalingGroup, self).check_create_complete(task)
        if done:
            self._create_groupwatch()
            self._finished_scaling(
                "%s : %s" %
                (sc_util.CFN_EXACT_CAPACITY, grouputils.get_size(self)))
        return done

    def check_update_complete(self, cookie):
        """Update the cooldown timestamp after update succeeds."""
        done = super(AutoScalingGroup, self).check_update_complete(cookie)
        if done:
            self._finished_scaling(
                "%s : %s" %
                (sc_util.CFN_EXACT_CAPACITY, grouputils.get_size(self)))
        return done

    def _get_new_capacity(self,
                          capacity,
                          adjustment,
                          adjustment_type=sc_util.CFN_EXACT_CAPACITY,
                          min_adjustment_step=None):
        lower = self.properties[self.MIN_SIZE]
        upper = self.properties[self.MAX_SIZE]
        return sc_util.calculate_new_capacity(capacity, adjustment,
                                              adjustment_type,
                                              min_adjustment_step, lower,
                                              upper)

    def _update_groupwatch(self):
        if not cfg.CONF.FusionSphere.groupwatch_enable:
            return

        group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id
        members = self._get_members(group_id)
        kwargs = {
            'id': group_id,
            'name': self.name,
            'type': 'VM',
            'members': members
        }

        self.client('groupwatch').groups.update(group_id, **kwargs)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Updates self.properties, if Properties has changed.

        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if tmpl_diff:
            # parse update policy
            if 'UpdatePolicy' in tmpl_diff:
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff:
            # Replace instances first if launch configuration has changed
            self._try_rolling_update(prop_diff)

        # Update will happen irrespective of whether auto-scaling
        # is in progress or not.
        capacity = grouputils.get_size(self)
        desired_capacity = self.properties[self.DESIRED_CAPACITY] or capacity
        new_capacity = self._get_new_capacity(capacity, desired_capacity)
        self.resize(new_capacity)

    def adjust(self,
               adjustment,
               adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
               min_adjustment_step=None):
        """Adjust the size of the scaling group if the cooldown permits."""
        if not self._is_scaling_allowed():
            LOG.info(
                _LI("%(name)s NOT performing scaling adjustment, "
                    "cooldown %(cooldown)s"), {
                        'name': self.name,
                        'cooldown': self.properties[self.COOLDOWN]
                    })
            raise exception.NoActionRequired()

        capacity = grouputils.get_size(self)
        new_capacity = self._get_new_capacity(capacity, adjustment,
                                              adjustment_type,
                                              min_adjustment_step)

        changed_size = new_capacity != capacity
        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()
            },
            'suffix': 'start',
        }
        notification.send(**notif)
        try:
            self.resize(new_capacity)
        except Exception as resize_ex:
            with excutils.save_and_reraise_exception():
                try:
                    notif.update({
                        'suffix': 'error',
                        'message': six.text_type(resize_ex),
                        'capacity': grouputils.get_size(self),
                    })
                    notification.send(**notif)
                except Exception:
                    LOG.exception(_LE('Failed sending error notification'))
        else:
            notif.update({
                'suffix': 'end',
                'capacity': new_capacity,
                'message': _("End resizing the group %(group)s") % {
                    'group': notif['groupname']
                },
            })
            notification.send(**notif)
        finally:
            self._update_groupwatch()
            self._finished_scaling("%s : %s" % (adjustment_type, adjustment),
                                   changed_size=changed_size)
        return changed_size

    def _tags(self):
        """Add Identifying Tags to all servers in the group.

        This is so the Dimensions received from cfn-push-stats all include
        the groupname and stack id.
        Note: the group name must match what is returned from FnGetRefId
        """
        autoscaling_tag = [{
            self.TAG_KEY: 'metering.AutoScalingGroupName',
            self.TAG_VALUE: self.FnGetRefId()
        }]
        return super(AutoScalingGroup, self)._tags() + autoscaling_tag

    def validate(self):
        # check validity of group size
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]

        if max_size < min_size:
            msg = _("MinSize can not be greater than MaxSize")
            raise exception.StackValidationFailed(message=msg)

        if min_size < 0:
            msg = _("The size of AutoScalingGroup can not be less than zero")
            raise exception.StackValidationFailed(message=msg)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            desired_capacity = self.properties[self.DESIRED_CAPACITY]
            if desired_capacity < min_size or desired_capacity > max_size:
                msg = _("DesiredCapacity must be between MinSize and MaxSize")
                raise exception.StackValidationFailed(message=msg)

        # TODO(pasquier-s): once Neutron is able to assign subnets to
        # availability zones, it will be possible to specify multiple subnets.
        # For now, only one subnet can be specified. The bug #1096017 tracks
        # this issue.
        if (self.properties.get(self.VPCZONE_IDENTIFIER)
                and len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
            raise exception.NotSupported(feature=_("Anything other than one "
                                                   "VPCZoneIdentifier"))
        # validate properties InstanceId and LaunchConfigurationName
        # for aws auto scaling group.
        # should provide just only one of
        if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
            instanceId = self.properties.get(self.INSTANCE_ID)
            launch_config = self.properties.get(self.LAUNCH_CONFIGURATION_NAME)
            if bool(instanceId) == bool(launch_config):
                msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
                        "must be provided.")
                raise exception.StackValidationFailed(message=msg)

        super(AutoScalingGroup, self).validate()

    def _resolve_attribute(self, name):
        """Resolves the resource's attributes.

        heat extension: "InstanceList" returns comma delimited list of server
        ip addresses.
        """
        if name == self.INSTANCE_LIST:
            return u','.join(
                inst.FnGetAtt('PublicIp')
                for inst in grouputils.get_members(self)) or None

    def child_template(self):
        if self.properties[self.DESIRED_CAPACITY]:
            num_instances = self.properties[self.DESIRED_CAPACITY]
        else:
            num_instances = self.properties[self.MIN_SIZE]
        return self._create_template(num_instances)

    def _delete_groupwatch(self):
        if not cfg.CONF.FusionSphere.groupwatch_enable:
            return

        if not self.resource_id:
            return

        group = None
        try:
            group = self.client('groupwatch').groups.get(self.resource_id)
        except Exception as ex:
            self.client_plugin('groupwatch').ignore_not_found(ex)
            return

        try:
            if (group and group.get('group') and 'data' in group.get('group')):
                scheduler_job_id = \
                    group.get('group').get('data').get('scheduler_job_id')
                self.client('scheduler').scheduler.delete(scheduler_job_id)
        except (AttributeError, KeyError):
            # do nothing
            pass
        except Exception as ex:
            self.client_plugin('scheduler').ignore_not_found(ex)

        try:
            self.client('groupwatch').groups.delete(self.resource_id)
        except Exception as ex:
            self.client_plugin('groupwatch').ignore_not_found(ex)

    def handle_delete(self):
        self._delete_groupwatch()
        return self.delete_nested()

    def handle_metadata_reset(self):
        metadata = self.metadata_get()
        if 'scaling_in_progress' in metadata:
            metadata['scaling_in_progress'] = False
            self.metadata_set(metadata)
コード例 #19
0
ファイル: pool.py プロジェクト: Hybrid-Cloud/conveyor
class Pool(neutron.NeutronResource):
    """A resource for managing LBaaS v2 Pools.

    This resources manages Neutron-LBaaS v2 Pools, which represent a group
    of nodes. Pools define the subnet where nodes reside, balancing algorithm,
    and the nodes themselves.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'lbaasv2'

    PROPERTIES = (
        ADMIN_STATE_UP,
        DESCRIPTION,
        SESSION_PERSISTENCE,
        NAME,
        LB_ALGORITHM,
        LISTENER,
        PROTOCOL,
        SESSION_PERSISTENCE_TYPE,
        SESSION_PERSISTENCE_COOKIE_NAME,
    ) = ('admin_state_up', 'description', 'session_persistence', 'name',
         'lb_algorithm', 'listener', 'protocol', 'type', 'cookie_name')

    SESSION_PERSISTENCE_TYPES = (SOURCE_IP, HTTP_COOKIE,
                                 APP_COOKIE) = ('SOURCE_IP', 'HTTP_COOKIE',
                                                'APP_COOKIE')

    ATTRIBUTES = (HEALTHMONITOR_ID_ATTR, LISTENERS_ATTR,
                  MEMBERS_ATTR) = ('healthmonitor_id', 'listeners', 'members')

    properties_schema = {
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of this pool.'),
                          default=True,
                          update_allowed=True,
                          constraints=[constraints.AllowedValues(['True'])]),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of this pool.'),
                          update_allowed=True,
                          default=''),
        SESSION_PERSISTENCE:
        properties.Schema(
            properties.Schema.MAP,
            _('Configuration of session persistence.'),
            schema={
                SESSION_PERSISTENCE_TYPE:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Method of implementation of session '
                      'persistence feature.'),
                    required=True,
                    constraints=[
                        constraints.AllowedValues(SESSION_PERSISTENCE_TYPES)
                    ]),
                SESSION_PERSISTENCE_COOKIE_NAME:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Name of the cookie, '
                      'required if type is APP_COOKIE.'))
            },
        ),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of this pool.'),
                          update_allowed=True),
        LB_ALGORITHM:
        properties.Schema(
            properties.Schema.STRING,
            _('The algorithm used to distribute load between the members of '
              'the pool.'),
            required=True,
            constraints=[
                constraints.AllowedValues(
                    ['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']),
            ],
            update_allowed=True,
        ),
        LISTENER:
        properties.Schema(
            properties.Schema.STRING,
            _('Listner name or ID to be associated with this pool.'),
            required=True),
        PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          _('Protocol of the pool.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues(['TCP', 'HTTP']),
                          ]),
    }

    attributes_schema = {
        HEALTHMONITOR_ID_ATTR:
        attributes.Schema(
            _('ID of the health monitor associated with this pool.'),
            type=attributes.Schema.STRING),
        LISTENERS_ATTR:
        attributes.Schema(_('Listener associated with this pool.'),
                          type=attributes.Schema.STRING),
        MEMBERS_ATTR:
        attributes.Schema(_('Members associated with this pool.'),
                          type=attributes.Schema.LIST),
    }

    def __init__(self, name, definition, stack):
        super(Pool, self).__init__(name, definition, stack)
        self._lb_id = None

    @property
    def lb_id(self):
        if self._lb_id is None:
            listener_id = self.client_plugin().find_resourceid_by_name_or_id(
                'listener', self.properties[self.LISTENER])
            listener = self.client().show_listener(listener_id)['listener']

            self._lb_id = listener['loadbalancers'][0]['id']
        return self._lb_id

    def validate(self):
        res = super(Pool, self).validate()
        if res:
            return res

        if self.properties[self.SESSION_PERSISTENCE] is not None:
            session_p = self.properties[self.SESSION_PERSISTENCE]
            persistence_type = session_p[self.SESSION_PERSISTENCE_TYPE]
            if persistence_type == self.APP_COOKIE:
                if not session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
                    msg = (_('Property %(cookie)s is required when %(sp)s '
                             'type is set to %(app)s.') %
                           {
                               'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
                               'sp': self.SESSION_PERSISTENCE,
                               'app': self.APP_COOKIE
                           })
                    raise exception.StackValidationFailed(message=msg)
            elif persistence_type == self.SOURCE_IP:
                if session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
                    msg = (_('Property %(cookie)s must NOT be specified when '
                             '%(sp)s type is set to %(ip)s.') %
                           {
                               'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
                               'sp': self.SESSION_PERSISTENCE,
                               'ip': self.SOURCE_IP
                           })
                    raise exception.StackValidationFailed(message=msg)

    def _check_lb_status(self):
        return self.client_plugin().check_lb_status(self.lb_id)

    def handle_create(self):
        properties = self.prepare_properties(self.properties,
                                             self.physical_resource_name())

        self.client_plugin().resolve_listener(properties, self.LISTENER,
                                              'listener_id')

        session_p = properties.get(self.SESSION_PERSISTENCE)
        if session_p is not None:
            session_props = self.prepare_properties(session_p, None)
            properties[self.SESSION_PERSISTENCE] = session_props

        return properties

    def check_create_complete(self, properties):
        if self.resource_id is None:
            try:
                pool = self.client().create_lbaas_pool({'pool':
                                                        properties})['pool']
                self.resource_id_set(pool['id'])
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def _show_resource(self):
        return self.client().show_lbaas_pool(self.resource_id)['pool']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        self._update_called = False
        return prop_diff

    def check_update_complete(self, prop_diff):
        if not prop_diff:
            return True

        if not self._update_called:
            try:
                self.client().update_lbaas_pool(self.resource_id,
                                                {'pool': prop_diff})
                self._update_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def handle_delete(self):
        self._delete_called = False

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        if not self._delete_called:
            try:
                self.client().delete_lbaas_pool(self.resource_id)
                self._delete_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                elif self.client_plugin().is_not_found(ex):
                    return True
                raise

        return self._check_lb_status()
コード例 #20
0
class NetworkGateway(neutron.NeutronResource):
    """Network Gateway resource in Neutron Network Gateway.

    Resource for connecting internal networks with specified devices.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        NAME,
        DEVICES,
        CONNECTIONS,
    ) = (
        'name',
        'devices',
        'connections',
    )

    ATTRIBUTES = (DEFAULT, ) = ('default', )

    _DEVICES_KEYS = (
        ID,
        INTERFACE_NAME,
    ) = (
        'id',
        'interface_name',
    )

    _CONNECTIONS_KEYS = (
        NETWORK_ID,
        NETWORK,
        SEGMENTATION_TYPE,
        SEGMENTATION_ID,
    ) = (
        'network_id',
        'network',
        'segmentation_type',
        'segmentation_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          description=_('The name of the network gateway.'),
                          update_allowed=True),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Device info for this network gateway.'),
            required=True,
            constraints=[constraints.Length(min=1)],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ID:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The device id for the network '
                                          'gateway.'),
                                      required=True),
                    INTERFACE_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The interface name for the '
                                          'network gateway.'),
                                      required=True)
                })),
        CONNECTIONS:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Connection info for this network gateway.'),
            default={},
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            message=_('Use property %s.') % NETWORK,
                            version='5.0.0',
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED, version='2014.2')),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    NETWORK:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_('The internal network to connect on '
                                      'the network gateway.'),
                        support_status=support.SupportStatus(version='2014.2'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    SEGMENTATION_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'L2 segmentation strategy on the external '
                            'side of the network gateway.'),
                        default='flat',
                        constraints=[
                            constraints.AllowedValues(('flat', 'vlan'))
                        ]),
                    SEGMENTATION_ID:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        description=_(
                            'The id for L2 segment on the external side '
                            'of the network gateway. Must be specified '
                            'when using vlan.'),
                        constraints=[constraints.Range(0, 4094)])
                }))
    }

    attributes_schema = {
        DEFAULT:
        attributes.Schema(_("A boolean value of default flag."),
                          type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.CONNECTIONS, self.NETWORK],
                                        value_name=self.NETWORK_ID),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.CONNECTIONS, self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network')
        ]

    def _show_resource(self):
        return self.client().show_network_gateway(
            self.resource_id)['network_gateway']

    def validate(self):
        """Validate any of the provided params."""
        super(NetworkGateway, self).validate()
        connections = self.properties[self.CONNECTIONS]

        for connection in connections:
            segmentation_type = connection[self.SEGMENTATION_TYPE]
            segmentation_id = connection.get(self.SEGMENTATION_ID)

            if segmentation_type == 'vlan' and segmentation_id is None:
                msg = _("segmentation_id must be specified for using vlan")
                raise exception.StackValidationFailed(message=msg)

            if segmentation_type == 'flat' and segmentation_id:
                msg = _("segmentation_id cannot be specified except 0 for "
                        "using flat")
                raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        connections = props.pop(self.CONNECTIONS)
        ret = self.client().create_network_gateway({'network_gateway':
                                                    props})['network_gateway']

        self.resource_id_set(ret['id'])

        for connection in connections:
            if self.NETWORK in connection:
                connection['network_id'] = connection.pop(self.NETWORK)
            self.client().connect_network_gateway(ret['id'], connection)

    def handle_delete(self):
        if not self.resource_id:
            return

        connections = self.properties[self.CONNECTIONS]
        for connection in connections:
            with self.client_plugin().ignore_not_found:
                if self.NETWORK in connection:
                    connection['network_id'] = connection.pop(self.NETWORK)
                self.client().disconnect_network_gateway(
                    self.resource_id, connection)
        try:
            self.client().delete_network_gateway(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        connections = None
        if self.CONNECTIONS in prop_diff:
            connections = prop_diff.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(prop_diff)
            self.handle_create()
            return

        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_network_gateway(
                self.resource_id, {'network_gateway': prop_diff})

        if connections:
            for connection in self.properties[self.CONNECTIONS]:
                with self.client_plugin().ignore_not_found:
                    if self.NETWORK in connection:
                        connection['network_id'] = connection.pop(self.NETWORK)
                    self.client().disconnect_network_gateway(
                        self.resource_id, connection)
            for connection in connections:
                if self.NETWORK in connection:
                    connection['network_id'] = connection.pop(self.NETWORK)
                self.client().connect_network_gateway(self.resource_id,
                                                      connection)
コード例 #21
0
class RandomString(resource.Resource):
    """A resource which generates a random string.

    This is useful for configuring passwords and secrets on services. Random
    string can be generated from specified character sequences, which means
    that all characters will be randomly chosen from specified sequences, or
    with some classes, e.g. letterdigits, which means that all character will
    be randomly chosen from union of ascii letters and digits. Output string
    will be randomly generated string with specified length (or with length of
    32, if length property doesn't specified).
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        LENGTH,
        SEQUENCE,
        CHARACTER_CLASSES,
        CHARACTER_SEQUENCES,
        SALT,
    ) = (
        'length',
        'sequence',
        'character_classes',
        'character_sequences',
        'salt',
    )

    _CHARACTER_CLASSES_KEYS = (
        CHARACTER_CLASSES_CLASS,
        CHARACTER_CLASSES_MIN,
    ) = (
        'class',
        'min',
    )

    _CHARACTER_SEQUENCES = (
        CHARACTER_SEQUENCES_SEQUENCE,
        CHARACTER_SEQUENCES_MIN,
    ) = (
        'sequence',
        'min',
    )

    ATTRIBUTES = (VALUE, ) = ('value', )

    properties_schema = {
        LENGTH:
        properties.Schema(properties.Schema.INTEGER,
                          _('Length of the string to generate.'),
                          default=32,
                          constraints=[
                              constraints.Range(1, 512),
                          ]),
        SEQUENCE:
        properties.Schema(
            properties.Schema.STRING,
            _('Sequence of characters to build the random string from.'),
            constraints=[
                constraints.AllowedValues([
                    'lettersdigits', 'letters', 'lowercase', 'uppercase',
                    'digits', 'hexdigits', 'octdigits'
                ]),
            ],
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    message=_('Use property %s.') % CHARACTER_CLASSES,
                    version='2014.2'))),
        CHARACTER_CLASSES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of character class and their constraints to generate '
              'the random string from.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CHARACTER_CLASSES_CLASS:
                    properties.Schema(
                        properties.Schema.STRING,
                        (_('A character class and its corresponding %(min)s '
                           'constraint to generate the random string from.') %
                         {
                             'min': CHARACTER_CLASSES_MIN
                         }),
                        constraints=[
                            constraints.AllowedValues([
                                'lettersdigits', 'letters', 'lowercase',
                                'uppercase', 'digits', 'hexdigits', 'octdigits'
                            ]),
                        ],
                        default='lettersdigits'),
                    CHARACTER_CLASSES_MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of characters from this '
                          'character class that will be in the generated '
                          'string.'),
                        default=1,
                        constraints=[
                            constraints.Range(1, 512),
                        ])
                }),
            # add defaults for backward compatibility
            default=[{
                CHARACTER_CLASSES_CLASS: 'lettersdigits',
                CHARACTER_CLASSES_MIN: 1
            }]),
        CHARACTER_SEQUENCES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of character sequences and their constraints to '
              'generate the random string from.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CHARACTER_SEQUENCES_SEQUENCE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('A character sequence and its corresponding %(min)s '
                          'constraint to generate the random string '
                          'from.') % {'min': CHARACTER_SEQUENCES_MIN},
                        required=True),
                    CHARACTER_SEQUENCES_MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of characters from this '
                          'sequence that will be in the generated '
                          'string.'),
                        default=1,
                        constraints=[
                            constraints.Range(1, 512),
                        ])
                })),
        SALT:
        properties.Schema(
            properties.Schema.STRING,
            _('Value which can be set or changed on stack update to trigger '
              'the resource for replacement with a new random string. The '
              'salt value itself is ignored by the random generator.')),
    }

    attributes_schema = {
        VALUE:
        attributes.Schema(_(
            'The random string generated by this resource. This value is '
            'also available by referencing the resource.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
    }

    _sequences = {
        'lettersdigits': string.ascii_letters + string.digits,
        'letters': string.ascii_letters,
        'lowercase': string.ascii_lowercase,
        'uppercase': string.ascii_uppercase,
        'digits': string.digits,
        'hexdigits': string.digits + 'ABCDEF',
        'octdigits': string.octdigits
    }

    def translation_rules(self, props):
        if props.get(self.SEQUENCE):
            return [
                translation.TranslationRule(
                    props, translation.TranslationRule.ADD,
                    [self.CHARACTER_CLASSES],
                    [{
                        self.CHARACTER_CLASSES_CLASS: props.get(self.SEQUENCE),
                        self.CHARACTER_CLASSES_MIN: 1
                    }]),
                translation.TranslationRule(props,
                                            translation.TranslationRule.DELETE,
                                            [self.SEQUENCE])
            ]

    def _generate_random_string(self, char_sequences, char_classes, length):
        random_string = ""

        # Add the minimum number of chars from each char sequence & char class
        if char_sequences:
            for char_seq in char_sequences:
                seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE]
                seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN]
                for i in six.moves.xrange(seq_min):
                    random_string += random.choice(seq)

        if char_classes:
            for char_class in char_classes:
                cclass_class = char_class[self.CHARACTER_CLASSES_CLASS]
                cclass_seq = self._sequences[cclass_class]
                cclass_min = char_class[self.CHARACTER_CLASSES_MIN]
                for i in six.moves.xrange(cclass_min):
                    random_string += random.choice(cclass_seq)

        def random_class_char():
            cclass_dict = random.choice(char_classes)
            cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS]
            cclass_seq = self._sequences[cclass_class]
            return random.choice(cclass_seq)

        def random_seq_char():
            seq_dict = random.choice(char_sequences)
            seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE]
            return random.choice(seq)

        # Fill up rest with random chars from provided sequences & classes
        if char_sequences and char_classes:
            weighted_choices = ([True] * len(char_classes) +
                                [False] * len(char_sequences))
            while len(random_string) < length:
                if random.choice(weighted_choices):
                    random_string += random_class_char()
                else:
                    random_string += random_seq_char()

        elif char_sequences:
            while len(random_string) < length:
                random_string += random_seq_char()

        else:
            while len(random_string) < length:
                random_string += random_class_char()

        # Randomize string
        random_string = ''.join(
            random.sample(random_string, len(random_string)))
        return random_string

    def validate(self):
        super(RandomString, self).validate()
        char_sequences = self.properties[self.CHARACTER_SEQUENCES]
        char_classes = self.properties[self.CHARACTER_CLASSES]

        def char_min(char_dicts, min_prop):
            if char_dicts:
                return sum(char_dict[min_prop] for char_dict in char_dicts)
            return 0

        length = self.properties[self.LENGTH]
        min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) +
                      char_min(char_classes, self.CHARACTER_CLASSES_MIN))
        if min_length > length:
            msg = _("Length property cannot be smaller than combined "
                    "character class and character sequence minimums")
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        char_sequences = self.properties[self.CHARACTER_SEQUENCES]
        char_classes = self.properties[self.CHARACTER_CLASSES]
        length = self.properties[self.LENGTH]

        random_string = self._generate_random_string(char_sequences,
                                                     char_classes, length)
        self.data_set('value', random_string, redact=True)
        self.resource_id_set(self.physical_resource_name())

    def _resolve_attribute(self, name):
        if name == self.VALUE:
            return self.data().get(self.VALUE)

    def get_reference_id(self):
        if self.resource_id is not None:
            return self.data().get('value')
        else:
            return six.text_type(self.name)
コード例 #22
0
class Pool(neutron.NeutronResource):
    """A resource for managing load balancer pools in Neutron.

    A load balancing pool is a logical set of devices, such as web servers,
    that you group together to receive and process traffic. The loadbalancing
    function chooses a member of the pool according to the configured load
    balancing method to handle the new requests or connections received on the
    VIP address. There is only one pool for a VIP.
    """

    required_service_extension = 'lbaas'

    PROPERTIES = (
        PROTOCOL, SUBNET_ID, SUBNET, LB_METHOD, NAME, DESCRIPTION,
        ADMIN_STATE_UP, VIP, MONITORS, PROVIDER,
    ) = (
        'protocol', 'subnet_id', 'subnet', 'lb_method', 'name', 'description',
        'admin_state_up', 'vip', 'monitors', 'provider',
    )

    _VIP_KEYS = (
        VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS,
        VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT,
        VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP,
    ) = (
        'name', 'description', 'subnet', 'address',
        'connection_limit', 'protocol_port',
        'session_persistence', 'admin_state_up',
    )

    _VIP_SESSION_PERSISTENCE_KEYS = (
        VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME,
    ) = (
        'type', 'cookie_name',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR, NAME_ATTR, PROTOCOL_ATTR, SUBNET_ID_ATTR,
        LB_METHOD_ATTR, DESCRIPTION_ATTR, TENANT_ID, VIP_ATTR, PROVIDER_ATTR,
    ) = (
        'admin_state_up', 'name', 'protocol', 'subnet_id',
        'lb_method', 'description', 'tenant_id', 'vip', 'provider',
    )

    properties_schema = {
        PROTOCOL: properties.Schema(
            properties.Schema.STRING,
            _('Protocol for balancing.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        SUBNET_ID: properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % SUBNET,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    version='2014.2'
                )
            ),
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ]
        ),
        SUBNET: properties.Schema(
            properties.Schema.STRING,
            _('The subnet for the port on which the members '
              'of the pool will be connected.'),
            support_status=support.SupportStatus(version='2014.2'),
            required=True,
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ]
        ),
        LB_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The algorithm used to distribute load between the members of '
              'the pool.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ROUND_ROBIN',
                                           'LEAST_CONNECTIONS', 'SOURCE_IP']),
            ],
            update_allowed=True
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the pool.')
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the pool.'),
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of this pool.'),
            default=True,
            update_allowed=True
        ),
        PROVIDER: properties.Schema(
            properties.Schema.STRING,
            _('LBaaS provider to implement this load balancer instance.'),
            support_status=support.SupportStatus(version='5.0.0'),
            constraints=[
                constraints.CustomConstraint('neutron.lb.provider')
            ],
        ),
        VIP: properties.Schema(
            properties.Schema.MAP,
            _('IP address and port of the pool.'),
            schema={
                VIP_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Name of the vip.')
                ),
                VIP_DESCRIPTION: properties.Schema(
                    properties.Schema.STRING,
                    _('Description of the vip.')
                ),
                VIP_SUBNET: properties.Schema(
                    properties.Schema.STRING,
                    _('Subnet of the vip.'),
                    constraints=[
                        constraints.CustomConstraint('neutron.subnet')
                    ]
                ),
                VIP_ADDRESS: properties.Schema(
                    properties.Schema.STRING,
                    _('IP address of the vip.'),
                    constraints=[
                        constraints.CustomConstraint('ip_addr')
                    ]
                ),
                VIP_CONNECTION_LIMIT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('The maximum number of connections per second '
                      'allowed for the vip.')
                ),
                VIP_PROTOCOL_PORT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('TCP port on which to listen for client traffic '
                      'that is associated with the vip address.'),
                    required=True
                ),
                VIP_SESSION_PERSISTENCE: properties.Schema(
                    properties.Schema.MAP,
                    _('Configuration of session persistence.'),
                    schema={
                        VIP_SESSION_PERSISTENCE_TYPE: properties.Schema(
                            properties.Schema.STRING,
                            _('Method of implementation of session '
                              'persistence feature.'),
                            required=True,
                            constraints=[constraints.AllowedValues(
                                ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
                            )]
                        ),
                        VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
                            properties.Schema.STRING,
                            _('Name of the cookie, '
                              'required if type is APP_COOKIE.')
                        )
                    }
                ),
                VIP_ADMIN_STATE_UP: properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('The administrative state of this vip.'),
                    default=True
                ),
            },
            required=True
        ),
        MONITORS: properties.Schema(
            properties.Schema.LIST,
            _('List of health monitors associated with the pool.'),
            default=[],
            update_allowed=True
        ),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR: attributes.Schema(
            _('The administrative state of this pool.'),
            type=attributes.Schema.STRING
        ),
        NAME_ATTR: attributes.Schema(
            _('Name of the pool.'),
            type=attributes.Schema.STRING
        ),
        PROTOCOL_ATTR: attributes.Schema(
            _('Protocol to balance.'),
            type=attributes.Schema.STRING
        ),
        SUBNET_ID_ATTR: attributes.Schema(
            _('The subnet for the port on which the members of the pool '
              'will be connected.'),
            type=attributes.Schema.STRING
        ),
        LB_METHOD_ATTR: attributes.Schema(
            _('The algorithm used to distribute load between the members '
              'of the pool.'),
            type=attributes.Schema.STRING
        ),
        DESCRIPTION_ATTR: attributes.Schema(
            _('Description of the pool.'),
            type=attributes.Schema.STRING
        ),
        TENANT_ID: attributes.Schema(
            _('Tenant owning the pool.'),
            type=attributes.Schema.STRING
        ),
        VIP_ATTR: attributes.Schema(
            _('Vip associated with the pool.'),
            type=attributes.Schema.MAP
        ),
        PROVIDER_ATTR: attributes.Schema(
            _('Provider implementing this load balancer instance.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING,
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.REPLACE,
                [self.SUBNET],
                value_path=[self.SUBNET_ID]
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.SUBNET],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='subnet'
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.VIP, self.VIP_SUBNET],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='subnet'
            )
        ]

    def validate(self):
        res = super(Pool, self).validate()
        if res:
            return res
        session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
        if session_p is None:
            # session persistence is not configured, skip validation
            return

        persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
        if persistence_type == 'APP_COOKIE':
            if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
                return

            msg = _('Property cookie_name is required, when '
                    'session_persistence type is set to APP_COOKIE.')
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        subnet_id = properties.pop(self.SUBNET)
        properties['subnet_id'] = subnet_id
        vip_properties = properties.pop(self.VIP)
        monitors = properties.pop(self.MONITORS)

        pool = self.client().create_pool({'pool': properties})['pool']
        self.resource_id_set(pool['id'])

        for monitor in monitors:
            self.client().associate_health_monitor(
                pool['id'], {'health_monitor': {'id': monitor}})

        vip_arguments = self.prepare_properties(
            vip_properties,
            '%s.vip' % (self.name,))

        session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
        if session_p is not None:
            prepared_props = self.prepare_properties(session_p, None)
            vip_arguments['session_persistence'] = prepared_props

        vip_arguments['protocol'] = self.properties[self.PROTOCOL]

        if vip_arguments.get(self.VIP_SUBNET) is None:
            vip_arguments['subnet_id'] = subnet_id
        else:
            vip_arguments['subnet_id'] = vip_arguments.pop(self.VIP_SUBNET)

        vip_arguments['pool_id'] = pool['id']
        vip = self.client().create_vip({'vip': vip_arguments})['vip']

        self.metadata_set({'vip': vip['id']})

    def _show_resource(self):
        return self.client().show_pool(self.resource_id)['pool']

    def check_create_complete(self, data):
        attributes = self._show_resource()
        status = attributes['status']
        if status == 'PENDING_CREATE':
            return False
        elif status == 'ACTIVE':
            vip_attributes = self.client().show_vip(
                self.metadata_get()['vip'])['vip']
            vip_status = vip_attributes['status']
            if vip_status == 'PENDING_CREATE':
                return False
            if vip_status == 'ACTIVE':
                return True
            if vip_status == 'ERROR':
                raise exception.ResourceInError(
                    resource_status=vip_status,
                    status_reason=_('error in vip'))
            raise exception.ResourceUnknownStatus(
                resource_status=vip_status,
                result=_('Pool creation failed due to vip'))
        elif status == 'ERROR':
            raise exception.ResourceInError(
                resource_status=status,
                status_reason=_('error in pool'))
        else:
            raise exception.ResourceUnknownStatus(
                resource_status=status,
                result=_('Pool creation failed'))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.MONITORS in prop_diff:
                monitors = set(prop_diff.pop(self.MONITORS))
                old_monitors = set(self.properties[self.MONITORS])
                for monitor in old_monitors - monitors:
                    self.client().disassociate_health_monitor(
                        self.resource_id, monitor)
                for monitor in monitors - old_monitors:
                    self.client().associate_health_monitor(
                        self.resource_id, {'health_monitor': {'id': monitor}})

            if prop_diff:
                self.client().update_pool(self.resource_id,
                                          {'pool': prop_diff})

    def _resolve_attribute(self, name):
        if name == self.VIP_ATTR:
            return self.client().show_vip(self.metadata_get()['vip'])['vip']
        return super(Pool, self)._resolve_attribute(name)

    def handle_delete(self):
        if not self.resource_id:
            prg = progress.PoolDeleteProgress(True)
            return prg

        prg = progress.PoolDeleteProgress()
        if not self.metadata_get():
            prg.vip['delete_called'] = True
            prg.vip['deleted'] = True
        return prg

    def _delete_vip(self):
        return self._not_found_in_call(
            self.client().delete_vip, self.metadata_get()['vip'])

    def _check_vip_deleted(self):
        return self._not_found_in_call(
            self.client().show_vip, self.metadata_get()['vip'])

    def _delete_pool(self):
        return self._not_found_in_call(
            self.client().delete_pool, self.resource_id)

    def check_delete_complete(self, prg):
        if not prg.vip['delete_called']:
            prg.vip['deleted'] = self._delete_vip()
            prg.vip['delete_called'] = True
            return False
        if not prg.vip['deleted']:
            prg.vip['deleted'] = self._check_vip_deleted()
            return False
        if not prg.pool['delete_called']:
            prg.pool['deleted'] = self._delete_pool()
            prg.pool['delete_called'] = True
            return prg.pool['deleted']
        if not prg.pool['deleted']:
            prg.pool['deleted'] = super(Pool, self).check_delete_complete(True)
            return prg.pool['deleted']
        return True
コード例 #23
0
ファイル: pool_member.py プロジェクト: Hybrid-Cloud/conveyor
class PoolMember(neutron.NeutronResource):
    """A resource for managing LBaaS v2 Pool Members.

    A pool member represents a single backend node.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'lbaasv2'

    PROPERTIES = (
        POOL, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
        SUBNET,
    ) = (
        'pool', 'address', 'protocol_port', 'weight', 'admin_state_up',
        'subnet'
    )

    ATTRIBUTES = (
        ADDRESS_ATTR, POOL_ID_ATTR
    ) = (
        'address', 'pool_id'
    )

    properties_schema = {
        POOL: properties.Schema(
            properties.Schema.STRING,
            _('Name or ID of the load balancing pool.'),
            required=True
        ),
        ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('IP address of the pool member on the pool network.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('ip_addr')
            ]
        ),
        PROTOCOL_PORT: properties.Schema(
            properties.Schema.INTEGER,
            _('Port on which the pool member listens for requests or '
              'connections.'),
            required=True,
            constraints=[
                constraints.Range(1, 65535),
            ]
        ),
        WEIGHT: properties.Schema(
            properties.Schema.INTEGER,
            _('Weight of pool member in the pool (default to 1).'),
            default=1,
            constraints=[
                constraints.Range(0, 256),
            ],
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the pool member.'),
            default=True,
            update_allowed=True,
            constraints=[constraints.AllowedValues(['True'])]
        ),
        SUBNET: properties.Schema(
            properties.Schema.STRING,
            _('Subnet name or ID of this member.'),
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ]
        ),
    }

    attributes_schema = {
        ADDRESS_ATTR: attributes.Schema(
            _('The IP address of the pool member.'),
            type=attributes.Schema.STRING
        ),
        POOL_ID_ATTR: attributes.Schema(
            _('The ID of the pool to which the pool member belongs.'),
            type=attributes.Schema.STRING
        )
    }

    def __init__(self, name, definition, stack):
        super(PoolMember, self).__init__(name, definition, stack)
        self._pool_id = None
        self._lb_id = None

    @property
    def pool_id(self):
        if self._pool_id is None:
            self._pool_id = self.client_plugin().find_resourceid_by_name_or_id(
                self.POOL,
                self.properties[self.POOL],
                cmd_resource='lbaas_pool')
        return self._pool_id

    @property
    def lb_id(self):
        if self._lb_id is None:
            pool = self.client().show_lbaas_pool(self.pool_id)['pool']

            listener_id = pool['listeners'][0]['id']
            listener = self.client().show_listener(listener_id)['listener']

            self._lb_id = listener['loadbalancers'][0]['id']
        return self._lb_id

    def _check_lb_status(self):
        return self.client_plugin().check_lb_status(self.lb_id)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())

        self.client_plugin().resolve_pool(
            properties, self.POOL, 'pool_id')
        properties.pop('pool_id')

        if self.SUBNET in properties:
            self.client_plugin().resolve_subnet(
                properties, self.SUBNET, 'subnet_id')

        return properties

    def check_create_complete(self, properties):
        if self.resource_id is None:
            try:
                member = self.client().create_lbaas_member(
                    self.pool_id, {'member': properties})['member']
                self.resource_id_set(member['id'])
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def _show_resource(self):
        member = self.client().show_lbaas_member(self.resource_id,
                                                 self.pool_id)
        return member['member']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        self._update_called = False
        return prop_diff

    def check_update_complete(self, prop_diff):
        if not prop_diff:
            return True

        if not self._update_called:
            try:
                self.client().update_lbaas_member(self.resource_id,
                                                  self.pool_id,
                                                  {'member': prop_diff})
                self._update_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def handle_delete(self):
        self._delete_called = False

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        if not self._delete_called:
            try:
                self.client().delete_lbaas_member(self.resource_id,
                                                  self.pool_id)
                self._delete_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                elif self.client_plugin().is_not_found(ex):
                    return True
                raise

        return self._check_lb_status()
コード例 #24
0
class StructuredDeployment(sd.SoftwareDeployment):
    """A resource which has same logic with OS::Heat::SoftwareDeployment.

    A deployment resource like OS::Heat::SoftwareDeployment, but which
    performs input value substitution on the config defined by a
    OS::Heat::StructuredConfig resource.

    Some configuration tools have no concept of inputs, so the input value
    substitution needs to occur in the deployment resource. An example of this
    is the JSON metadata consumed by the cfn-init tool.

    Where the config contains {get_input: input_name} this will be substituted
    with the value of input_name in this resource's input_values. If get_input
    needs to be passed through to the substituted configuration then a
    different input_key property value can be specified.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (CONFIG, SERVER, INPUT_VALUES, DEPLOY_ACTIONS, NAME,
                  SIGNAL_TRANSPORT, INPUT_KEY, INPUT_VALUES_VALIDATE) = (
                      sd.SoftwareDeployment.CONFIG,
                      sd.SoftwareDeployment.SERVER,
                      sd.SoftwareDeployment.INPUT_VALUES,
                      sd.SoftwareDeployment.DEPLOY_ACTIONS,
                      sd.SoftwareDeployment.NAME,
                      sd.SoftwareDeployment.SIGNAL_TRANSPORT, 'input_key',
                      'input_values_validate')

    _sd_ps = sd.SoftwareDeployment.properties_schema

    properties_schema = {
        CONFIG:
        _sd_ps[CONFIG],
        SERVER:
        _sd_ps[SERVER],
        INPUT_VALUES:
        _sd_ps[INPUT_VALUES],
        DEPLOY_ACTIONS:
        _sd_ps[DEPLOY_ACTIONS],
        SIGNAL_TRANSPORT:
        _sd_ps[SIGNAL_TRANSPORT],
        NAME:
        _sd_ps[NAME],
        INPUT_KEY:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of key to use for substituting inputs during deployment.'),
            default='get_input',
        ),
        INPUT_VALUES_VALIDATE:
        properties.Schema(
            properties.Schema.STRING,
            _('Perform a check on the input values passed to verify that '
              'each required input has a corresponding value. '
              'When the property is set to STRICT and no value is passed, '
              'an exception is raised.'),
            default='LAX',
            constraints=[
                constraints.AllowedValues(['LAX', 'STRICT']),
            ],
        )
    }

    def empty_config(self):
        return {}

    def _build_derived_config(self, action, source, derived_inputs,
                              derived_options):
        cfg = source.get(sc.SoftwareConfig.CONFIG)
        input_key = self.properties[self.INPUT_KEY]
        check_input_val = self.properties[self.INPUT_VALUES_VALIDATE]

        inputs = dict((i['name'], i['value']) for i in derived_inputs)

        return self.parse(inputs, input_key, cfg, check_input_val)

    @staticmethod
    def get_input_key_arg(snippet, input_key):
        if len(snippet) != 1:
            return None
        fn_name, fn_arg = next(six.iteritems(snippet))
        if (fn_name == input_key and isinstance(fn_arg, six.string_types)):
            return fn_arg

    @staticmethod
    def get_input_key_value(fn_arg, inputs, check_input_val='LAX'):
        if check_input_val == 'STRICT' and fn_arg not in inputs:
            raise exception.UserParameterMissing(key=fn_arg)
        return inputs.get(fn_arg)

    @staticmethod
    def parse(inputs, input_key, snippet, check_input_val='LAX'):
        parse = functools.partial(StructuredDeployment.parse,
                                  inputs,
                                  input_key,
                                  check_input_val=check_input_val)

        if isinstance(snippet, collections.Mapping):
            fn_arg = StructuredDeployment.get_input_key_arg(snippet, input_key)
            if fn_arg is not None:
                return StructuredDeployment.get_input_key_value(
                    fn_arg, inputs, check_input_val)

            return dict((k, parse(v)) for k, v in six.iteritems(snippet))
        elif (not isinstance(snippet, six.string_types)
              and isinstance(snippet, collections.Iterable)):
            return [parse(v) for v in snippet]
        else:
            return snippet
コード例 #25
0
class HealthMonitor(neutron.NeutronResource):
    """A resource to handle load balancer health monitors.

    This resource creates and manages Neutron LBaaS v2 healthmonitors,
    which watches status of the load balanced servers.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'lbaasv2'

    # Properties inputs for the resources create/update.
    PROPERTIES = (
        ADMIN_STATE_UP, DELAY, EXPECTED_CODES, HTTP_METHOD,
        MAX_RETRIES, POOL, TIMEOUT, TYPE, URL_PATH, TENANT_ID
    ) = (
        'admin_state_up', 'delay', 'expected_codes', 'http_method',
        'max_retries', 'pool', 'timeout', 'type', 'url_path', 'tenant_id'
    )

    # Supported HTTP methods
    HTTP_METHODS = (
        GET, HEAT, POST, PUT, DELETE, TRACE, OPTIONS,
        CONNECT, PATCH
    ) = (
        'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS',
        'CONNECT', 'PATCH'
    )

    # Supported output attributes of the resources.
    ATTRIBUTES = (POOLS_ATTR) = ('pools')

    properties_schema = {
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the health monitor.'),
            default=True,
            update_allowed=True,
            constraints=[constraints.AllowedValues(['True'])]
        ),
        DELAY: properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum time in seconds between regular connections of '
              'the member.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=0, max=2147483647)]
        ),
        EXPECTED_CODES: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP status codes expected in response from the '
              'member to declare it healthy. Specify one of the following '
              'values: a single value, such as 200. a list, such as 200, 202. '
              'a range, such as 200-204.'),
            update_allowed=True,
            default='200'
        ),
        HTTP_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP method used for requests by the monitor of type '
              'HTTP.'),
            update_allowed=True,
            default=GET,
            constraints=[constraints.AllowedValues(HTTP_METHODS)]
        ),
        MAX_RETRIES: properties.Schema(
            properties.Schema.INTEGER,
            _('Number of permissible connection failures before changing the '
              'member status to INACTIVE.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=1, max=10)],
        ),
        POOL: properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the load balancing pool.'),
            required=True
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of seconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=0, max=2147483647)]
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('One of predefined health monitor types.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['PING', 'TCP', 'HTTP']),
            ]
        ),
        URL_PATH: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP path used in the HTTP request used by the monitor to '
              'test a member health. A valid value is a string the begins '
              'with a forward slash (/).'),
            update_allowed=True,
            default='/'
        ),
        TENANT_ID: properties.Schema(
            properties.Schema.STRING,
            _('ID of the tenant who owns the health monitor.')
        )
    }

    attributes_schema = {
        POOLS_ATTR: attributes.Schema(
            _('The list of Pools related to this monitor.'),
            type=attributes.Schema.LIST
        )
    }

    def __init__(self, name, definition, stack):
        super(HealthMonitor, self).__init__(name, definition, stack)
        self._lb_id = None

    @property
    def lb_id(self):
        if self._lb_id is None:
            pool_id = self.client_plugin().find_resourceid_by_name_or_id(
                self.POOL,
                self.properties[self.POOL],
                cmd_resource='lbaas_pool')
            pool = self.client().show_lbaas_pool(pool_id)['pool']

            listener_id = pool['listeners'][0]['id']
            listener = self.client().show_listener(listener_id)['listener']

            self._lb_id = listener['loadbalancers'][0]['id']
        return self._lb_id

    def _check_lb_status(self):
        return self.client_plugin().check_lb_status(self.lb_id)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())

        self.client_plugin().resolve_pool(
            properties, self.POOL, 'pool_id')

        return properties

    def check_create_complete(self, properties):
        if self.resource_id is None:
            try:
                healthmonitor = self.client().create_lbaas_healthmonitor(
                    {'healthmonitor': properties})['healthmonitor']
                self.resource_id_set(healthmonitor['id'])
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def _show_resource(self):
        return self.client().show_lbaas_healthmonitor(
            self.resource_id)['healthmonitor']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        self._update_called = False
        return prop_diff

    def check_update_complete(self, prop_diff):
        if not prop_diff:
            return True

        if not self._update_called:
            try:
                self.client().update_lbaas_healthmonitor(
                    self.resource_id, {'healthmonitor': prop_diff})
                self._update_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def handle_delete(self):
        self._delete_called = False

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        if not self._delete_called:
            try:
                self.client().delete_lbaas_healthmonitor(self.resource_id)
                self._delete_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                elif self.client_plugin().is_not_found(ex):
                    return True
                raise

        return self._check_lb_status()
コード例 #26
0
class AutoScalingPolicy(signal_responder.SignalResponder,
                        cooldown.CooldownMixin):
    """A resource to manage scaling of `OS::Heat::AutoScalingGroup`.

    **Note** while it may incidentally support
    `AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that
    purpose and use `AWS::AutoScaling::ScalingPolicy` instead.

    Resource to manage scaling for `OS::Heat::AutoScalingGroup`, i.e. define
    which metric should be scaled and scaling adjustment, set cooldown etc.
    """
    PROPERTIES = (AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
                  COOLDOWN, MIN_ADJUSTMENT_STEP) = (
                      'auto_scaling_group_id',
                      'scaling_adjustment',
                      'adjustment_type',
                      'cooldown',
                      'min_adjustment_step',
                  )

    ATTRIBUTES = (ALARM_URL, SIGNAL_URL) = ('alarm_url', 'signal_url')

    properties_schema = {
        # TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID
        AUTO_SCALING_GROUP_NAME:
        properties.Schema(properties.Schema.STRING,
                          _('AutoScaling group ID to apply policy to.'),
                          required=True),
        SCALING_ADJUSTMENT:
        properties.Schema(properties.Schema.NUMBER,
                          _('Size of adjustment.'),
                          required=True,
                          update_allowed=True),
        ADJUSTMENT_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Type of adjustment (absolute or percentage).'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  sc_util.CHANGE_IN_CAPACITY,
                                  sc_util.EXACT_CAPACITY,
                                  sc_util.PERCENT_CHANGE_IN_CAPACITY
                              ]),
                          ],
                          update_allowed=True),
        COOLDOWN:
        properties.Schema(properties.Schema.NUMBER,
                          _('Cooldown period, in seconds.'),
                          update_allowed=True),
        MIN_ADJUSTMENT_STEP:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Minimum number of resources that are added or removed '
              'when the AutoScaling group scales up or down. This can '
              'be used only when specifying percent_change_in_capacity '
              'for the adjustment_type property.'),
            constraints=[
                constraints.Range(min=0, ),
            ],
            update_allowed=True),
    }

    attributes_schema = {
        ALARM_URL:
        attributes.Schema(_("A signed url to handle the alarm."),
                          type=attributes.Schema.STRING),
        SIGNAL_URL:
        attributes.Schema(
            _("A url to handle the alarm using native API."),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
    }

    def validate(self):
        """Add validation for min_adjustment_step."""
        super(AutoScalingPolicy, self).validate()
        self._validate_min_adjustment_step()

    def _validate_min_adjustment_step(self):
        adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE)
        adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP)
        if (adjustment_type != sc_util.PERCENT_CHANGE_IN_CAPACITY
                and adjustment_step is not None):
            raise exception.ResourcePropertyValueDependency(
                prop1=self.MIN_ADJUSTMENT_STEP,
                prop2=self.ADJUSTMENT_TYPE,
                value=sc_util.PERCENT_CHANGE_IN_CAPACITY)

    def handle_metadata_reset(self):
        metadata = self.metadata_get()
        if 'scaling_in_progress' in metadata:
            metadata['scaling_in_progress'] = False
            self.metadata_set(metadata)

    def handle_create(self):
        super(AutoScalingPolicy, self).handle_create()
        self.resource_id_set(self._get_user_id())

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Updates self.properties, if Properties has changed.

        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if prop_diff:
            self.properties = json_snippet.properties(self.properties_schema,
                                                      self.context)

    def handle_signal(self, details=None):
        # ceilometer sends details like this:
        # {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm',
        #  u'reason': u'...'})
        # in this policy we currently assume that this gets called
        # only when there is an alarm. But the template writer can
        # put the policy in all the alarm notifiers (nodata, and ok).
        #
        # our watchrule has upper case states so lower() them all.
        if details is None:
            alarm_state = 'alarm'
        else:
            alarm_state = details.get('current', details.get('state',
                                                             'alarm')).lower()

        LOG.info(_LI('Alarm %(name)s, new state %(state)s'), {
            'name': self.name,
            'state': alarm_state
        })

        if alarm_state != 'alarm':
            raise exception.NoActionRequired()
        if not self._is_scaling_allowed():
            LOG.info(
                _LI("%(name)s NOT performing scaling action, "
                    "cooldown %(cooldown)s"), {
                        'name': self.name,
                        'cooldown': self.properties[self.COOLDOWN]
                    })
            raise exception.NoActionRequired()

        asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
        group = self.stack.resource_by_refid(asgn_id)
        changed_size = False
        try:
            if group is None:
                raise exception.NotFound(
                    _('Alarm %(alarm)s could not find '
                      'scaling group named "%(group)s"') % {
                          'alarm': self.name,
                          'group': asgn_id
                      })

            LOG.info(
                _LI('%(name)s Alarm, adjusting Group %(group)s with id '
                    '%(asgn_id)s by %(filter)s'), {
                        'name': self.name,
                        'group': group.name,
                        'asgn_id': asgn_id,
                        'filter': self.properties[self.SCALING_ADJUSTMENT]
                    })
            changed_size = group.adjust(
                self.properties[self.SCALING_ADJUSTMENT],
                self.properties[self.ADJUSTMENT_TYPE],
                self.properties[self.MIN_ADJUSTMENT_STEP])
        finally:
            self._finished_scaling("%s : %s" %
                                   (self.properties[self.ADJUSTMENT_TYPE],
                                    self.properties[self.SCALING_ADJUSTMENT]),
                                   changed_size=changed_size)

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.ALARM_URL:
            return six.text_type(self._get_ec2_signed_url())
        elif name == self.SIGNAL_URL:
            return six.text_type(self._get_heat_signal_url())

    def get_reference_id(self):
        return resource.Resource.get_reference_id(self)
コード例 #27
0
ファイル: endpoint.py プロジェクト: Hybrid-Cloud/conveyor
class KeystoneEndpoint(resource.Resource):
    """Heat Template Resource for Keystone Service Endpoint.

    Keystone endpoint is just the URL that can be used for accessing a service
    within OpenStack. Endpoint can be accessed by admin, by services or public,
    i.e. everyone can use this endpoint.
    """

    support_status = support.SupportStatus(
        version='5.0.0', message=_('Supported versions: keystone v3'))

    default_client_name = 'keystone'

    entity = 'endpoints'

    PROPERTIES = (
        NAME,
        REGION,
        SERVICE,
        INTERFACE,
        SERVICE_URL,
        ENABLED,
    ) = (
        'name',
        'region',
        'service',
        'interface',
        'url',
        'enabled',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of keystone endpoint.'),
                          update_allowed=True),
        REGION:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or Id of keystone region.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('keystone.region')]),
        SERVICE:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or Id of keystone service.'),
            update_allowed=True,
            required=True,
            constraints=[constraints.CustomConstraint('keystone.service')]),
        INTERFACE:
        properties.Schema(properties.Schema.STRING,
                          _('Interface type of keystone service endpoint.'),
                          update_allowed=True,
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['public', 'internal', 'admin'])
                          ]),
        SERVICE_URL:
        properties.Schema(properties.Schema.STRING,
                          _('URL of keystone service endpoint.'),
                          update_allowed=True,
                          required=True),
        ENABLED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('This endpoint is enabled or disabled.'),
            default=True,
            update_allowed=True,
            support_status=support.SupportStatus(version='6.0.0'))
    }

    def client(self):
        return super(KeystoneEndpoint, self).client().client

    def handle_create(self):
        region = self.properties[self.REGION]
        service = self.properties[self.SERVICE]
        interface = self.properties[self.INTERFACE]
        url = self.properties[self.SERVICE_URL]
        name = (self.properties[self.NAME] or self.physical_resource_name())
        enabled = self.properties[self.ENABLED]

        endpoint = self.client().endpoints.create(region=region,
                                                  service=service,
                                                  interface=interface,
                                                  url=url,
                                                  name=name,
                                                  enabled=enabled)

        self.resource_id_set(endpoint.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            region = prop_diff.get(self.REGION)
            service = prop_diff.get(self.SERVICE)
            interface = prop_diff.get(self.INTERFACE)
            url = prop_diff.get(self.SERVICE_URL)
            name = None
            # Don't update the name if no change
            if self.NAME in prop_diff:
                name = prop_diff[self.NAME] or self.physical_resource_name()
            enabled = prop_diff.get(self.ENABLED)

            self.client().endpoints.update(endpoint=self.resource_id,
                                           region=region,
                                           service=service,
                                           interface=interface,
                                           url=url,
                                           name=name,
                                           enabled=enabled)
コード例 #28
0
class VPC(resource.Resource):

    PROPERTIES = (
        CIDR_BLOCK,
        INSTANCE_TENANCY,
        TAGS,
    ) = (
        'CidrBlock',
        'InstanceTenancy',
        'Tags',
    )

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    properties_schema = {
        CIDR_BLOCK:
        properties.Schema(properties.Schema.STRING,
                          _('CIDR block to apply to the VPC.')),
        INSTANCE_TENANCY:
        properties.Schema(
            properties.Schema.STRING,
            _('Allowed tenancy of instances launched in the VPC. default - '
              'any tenancy; dedicated - instance will be dedicated, '
              'regardless of the tenancy option specified at instance '
              'launch.'),
            default='default',
            constraints=[
                constraints.AllowedValues(['default', 'dedicated']),
            ],
            implemented=False),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              _('List of tags to attach to the instance.'),
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                              },
                              implemented=False,
                          )),
    }

    default_client_name = 'neutron'

    def handle_create(self):
        # The VPC's net and router are associated by having identical names.
        net_props = {'name': self.physical_resource_name()}
        router_props = {'name': self.physical_resource_name()}

        net = self.client().create_network({'network': net_props})['network']
        self.resource_id_set(net['id'])
        self.client().create_router({'router': router_props})['router']

    @staticmethod
    def network_for_vpc(client, network_id):
        return client.show_network(network_id)['network']

    @staticmethod
    def router_for_vpc(client, network_id):
        # first get the neutron net
        net = VPC.network_for_vpc(client, network_id)
        # then find a router with the same name
        routers = client.list_routers(name=net['name'])['routers']
        if len(routers) == 0:
            # There may be no router if the net was created manually
            # instead of in another stack.
            return None
        if len(routers) > 1:
            raise exception.Error(
                _('Multiple routers found with name %s') % net['name'])
        return routers[0]

    def check_create_complete(self, *args):
        net = self.network_for_vpc(self.client(), self.resource_id)
        if not neutron.NeutronResource.is_built(net):
            return False
        router = self.router_for_vpc(self.client(), self.resource_id)
        return neutron.NeutronResource.is_built(router)

    def handle_delete(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            router = self.router_for_vpc(self.client(), self.resource_id)
            if router:
                self.client().delete_router(router['id'])

        with self.client_plugin().ignore_not_found:
            self.client().delete_network(self.resource_id)
コード例 #29
0
class HealthMonitor(neutron.NeutronResource):
    """A resource for managing health monitors for loadbalancers in Neutron.

    A health monitor is used to determine whether or not back-end members of
    the VIP's pool are usable for processing a request. A pool can have several
    health monitors associated with it. There are different types of health
    monitors supported by the OpenStack LBaaS service:

      - PING: used to ping the members using ICMP.
      - TCP: used to connect to the members using TCP.
      - HTTP: used to send an HTTP request to the member.
      - HTTPS: used to send a secure HTTP request to the member.
    """

    required_service_extension = 'lbaas'

    PROPERTIES = (
        DELAY, TYPE, MAX_RETRIES, TIMEOUT, ADMIN_STATE_UP,
        HTTP_METHOD, EXPECTED_CODES, URL_PATH,
    ) = (
        'delay', 'type', 'max_retries', 'timeout', 'admin_state_up',
        'http_method', 'expected_codes', 'url_path',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR, DELAY_ATTR, EXPECTED_CODES_ATTR, HTTP_METHOD_ATTR,
        MAX_RETRIES_ATTR, TIMEOUT_ATTR, TYPE_ATTR, URL_PATH_ATTR, TENANT_ID,
    ) = (
        'admin_state_up', 'delay', 'expected_codes', 'http_method',
        'max_retries', 'timeout', 'type', 'url_path', 'tenant_id',
    )

    properties_schema = {
        DELAY: properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum time in seconds between regular connections of '
              'the member.'),
            required=True,
            update_allowed=True
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('One of predefined health monitor types.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        MAX_RETRIES: properties.Schema(
            properties.Schema.INTEGER,
            _('Number of permissible connection failures before changing the '
              'member status to INACTIVE.'),
            required=True,
            update_allowed=True
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of seconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            required=True,
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the health monitor.'),
            default=True,
            update_allowed=True
        ),
        HTTP_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP method used for requests by the monitor of type '
              'HTTP.'),
            update_allowed=True
        ),
        EXPECTED_CODES: properties.Schema(
            properties.Schema.STRING,
            _('The list of HTTP status codes expected in response from the '
              'member to declare it healthy.'),
            update_allowed=True
        ),
        URL_PATH: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP path used in the HTTP request used by the monitor to '
              'test a member health.'),
            update_allowed=True
        ),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR: attributes.Schema(
            _('The administrative state of this health monitor.'),
            type=attributes.Schema.STRING
        ),
        DELAY_ATTR: attributes.Schema(
            _('The minimum time in seconds between regular connections '
              'of the member.'),
            type=attributes.Schema.STRING
        ),
        EXPECTED_CODES_ATTR: attributes.Schema(
            _('The list of HTTP status codes expected in response '
              'from the member to declare it healthy.'),
            type=attributes.Schema.LIST
        ),
        HTTP_METHOD_ATTR: attributes.Schema(
            _('The HTTP method used for requests by the monitor of '
              'type HTTP.'),
            type=attributes.Schema.STRING
        ),
        MAX_RETRIES_ATTR: attributes.Schema(
            _('Number of permissible connection failures before changing '
              'the member status to INACTIVE.'),
            type=attributes.Schema.STRING
        ),
        TIMEOUT_ATTR: attributes.Schema(
            _('Maximum number of seconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            type=attributes.Schema.STRING
        ),
        TYPE_ATTR: attributes.Schema(
            _('One of predefined health monitor types.'),
            type=attributes.Schema.STRING
        ),
        URL_PATH_ATTR: attributes.Schema(
            _('The HTTP path used in the HTTP request used by the monitor '
              'to test a member health.'),
            type=attributes.Schema.STRING
        ),
        TENANT_ID: attributes.Schema(
            _('Tenant owning the health monitor.'),
            type=attributes.Schema.STRING
        ),
    }

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        health_monitor = self.client().create_health_monitor(
            {'health_monitor': properties})['health_monitor']
        self.resource_id_set(health_monitor['id'])

    def _show_resource(self):
        return self.client().show_health_monitor(
            self.resource_id)['health_monitor']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_health_monitor(
                self.resource_id, {'health_monitor': prop_diff})

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().delete_health_monitor(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
コード例 #30
0
class HeatWaitConditionHandle(wc_base.BaseWaitConditionHandle):
    """Resource for managing instance signals.

    The main points of this resource are:
      - have no dependencies (so the instance can reference it).
      - create credentials to allow for signalling from the instance.
      - handle signals from the instance, validate and store result.
    """

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (SIGNAL_TRANSPORT, ) = ('signal_transport', )

    SIGNAL_TRANSPORTS = (CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL, NO_SIGNAL,
                         ZAQAR_SIGNAL,
                         TOKEN_SIGNAL) = ('CFN_SIGNAL', 'TEMP_URL_SIGNAL',
                                          'HEAT_SIGNAL', 'NO_SIGNAL',
                                          'ZAQAR_SIGNAL', 'TOKEN_SIGNAL')

    properties_schema = {
        SIGNAL_TRANSPORT:
        properties.Schema(
            properties.Schema.STRING,
            _('How the client will signal the wait condition. CFN_SIGNAL '
              'will allow an HTTP POST to a CFN keypair signed URL. '
              'TEMP_URL_SIGNAL will create a Swift TempURL to be '
              'signalled via HTTP PUT. HEAT_SIGNAL will allow calls to the '
              'Heat API resource-signal using the provided keystone '
              'credentials. ZAQAR_SIGNAL will create a dedicated zaqar queue '
              'to be signalled using the provided keystone credentials. '
              'TOKEN_SIGNAL will allow and HTTP POST to a Heat API endpoint '
              'with the provided keystone token. NO_SIGNAL will result in '
              'the resource going to a signalled state without waiting for '
              'any signal.'),
            default='TOKEN_SIGNAL',
            constraints=[
                constraints.AllowedValues(SIGNAL_TRANSPORTS),
            ],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
    }

    ATTRIBUTES = (
        TOKEN,
        ENDPOINT,
        CURL_CLI,
        SIGNAL,
    ) = (
        'token',
        'endpoint',
        'curl_cli',
        'signal',
    )

    attributes_schema = {
        TOKEN:
        attributes.Schema(_(
            'Token for stack-user which can be used for signalling handle '
            'when signal_transport is set to TOKEN_SIGNAL. None for all '
            'other signal transports.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
        ENDPOINT:
        attributes.Schema(_(
            'Endpoint/url which can be used for signalling handle when '
            'signal_transport is set to TOKEN_SIGNAL. None for all '
            'other signal transports.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
        CURL_CLI:
        attributes.Schema(_(
            'Convenience attribute, provides curl CLI command '
            'prefix, which can be used for signalling handle completion or '
            'failure when signal_transport is set to TOKEN_SIGNAL. You '
            'can signal success by adding '
            '--data-binary \'{"status": "SUCCESS"}\' '
            ', or signal failure by adding '
            '--data-binary \'{"status": "FAILURE"}\'. '
            'This attribute is set to None for all other signal '
            'transports.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
        SIGNAL:
        attributes.Schema(_(
            'JSON serialized map that includes the endpoint, token and/or '
            'other attributes the client must use for signalling this '
            'handle. The contents of this map depend on the type of signal '
            'selected in the signal_transport property.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING)
    }

    METADATA_KEYS = (DATA, REASON, STATUS, UNIQUE_ID) = ('data', 'reason',
                                                         'status', 'id')

    def _signal_transport_token(self):
        return self.properties.get(self.SIGNAL_TRANSPORT) == self.TOKEN_SIGNAL

    def handle_create(self):
        self.password = base64.b64encode(crypt.get_secure_random(size=16))
        super(HeatWaitConditionHandle, self).handle_create()
        if self._signal_transport_token():
            # FIXME(shardy): The assumption here is that token expiry > timeout
            # but we probably need a check here to fail fast if that's not true
            # Also need to implement an update property, such that the handle
            # can be replaced on update which will replace the token
            token = self._user_token()
            self.data_set('token', token, True)
            self.data_set('endpoint',
                          '%s/signal' % self._get_resource_endpoint())

    def _get_resource_endpoint(self):
        # Get the endpoint from stack.clients then replace the context
        # project_id with the path to the resource (which includes the
        # context project_id), then replace the context project with
        # the one needed for signalling from the stack_user_project
        if (cfg.CONF.FusionSphere.pubcloud
                and cfg.CONF.FusionSphere.heat_orchestration_url):
            endpoint = cfg.CONF.FusionSphere.heat_orchestration_url
            rsrc_ep = '%s/%s' % (endpoint, self.identifier().url_path())
        else:
            heat_client_plugin = self.stack.clients.client_plugin('heat')
            endpoint = heat_client_plugin.get_heat_url()
            rsrc_ep = endpoint.replace(self.context.tenant_id,
                                       self.identifier().url_path())
        return rsrc_ep.replace(self.context.tenant_id,
                               self.stack.stack_user_project_id)

    def _resolve_attribute(self, key):
        if self.resource_id:
            if key == self.SIGNAL:
                return jsonutils.dumps(
                    self._get_signal(
                        signal_type=signal_responder.WAITCONDITION,
                        multiple_signals=True))
            elif key == self.TOKEN:
                return self.data().get('token')
            elif key == self.ENDPOINT:
                return self.data().get('endpoint')
            elif key == self.CURL_CLI:
                # Construct curl command for template-author convenience
                endpoint = self.data().get('endpoint')
                token = self.data().get('token')
                if endpoint is None or token is None:
                    return None
                return ("curl -i --insecure -X POST "
                        "-H 'X-Auth-Token: %(token)s' "
                        "-H 'Content-Type: application/json' "
                        "-H 'Accept: application/json' "
                        "%(endpoint)s" % dict(token=token, endpoint=endpoint))

    def get_status(self):
        # before we check status, we have to update the signal transports
        # that require constant polling
        self._service_signal()

        return super(HeatWaitConditionHandle, self).get_status()

    def handle_signal(self, details=None):
        """Validate and update the resource metadata.

        Metadata is not mandatory, but if passed it must use the following
        format:
        {
            "status" : "Status (must be SUCCESS or FAILURE)",
            "data" : "Arbitrary data",
            "reason" : "Reason string"
        }
        Optionally "id" may also be specified, but if missing the index
        of the signal received will be used.
        """
        return super(HeatWaitConditionHandle, self).handle_signal(details)

    def normalise_signal_data(self, signal_data, latest_metadata):
        signal_num = len(latest_metadata) + 1
        reason = 'Signal %s received' % signal_num
        # Tolerate missing values, default to success
        metadata = signal_data.copy() if signal_data else {}
        metadata.setdefault(self.REASON, reason)
        metadata.setdefault(self.DATA, None)
        metadata.setdefault(self.UNIQUE_ID, signal_num)
        metadata.setdefault(self.STATUS, self.STATUS_SUCCESS)
        return metadata