Ejemplo n.º 1
0
class CloudDns(resource.Resource):
    """Represents a DNS resource."""

    PROPERTIES = (
        NAME,
        EMAIL_ADDRESS,
        TTL,
        COMMENT,
        RECORDS,
    ) = (
        'name',
        'emailAddress',
        'ttl',
        'comment',
        'records',
    )

    _RECORD_KEYS = (
        RECORD_COMMENT,
        RECORD_NAME,
        RECORD_DATA,
        RECORD_PRIORITY,
        RECORD_TTL,
        RECORD_TYPE,
    ) = (
        'comment',
        'name',
        'data',
        'priority',
        'ttl',
        'type',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Specifies the name for the domain or subdomain. Must be a '
              'valid domain name.'),
            required=True,
            constraints=[
                constraints.Length(min=3),
            ]),
        EMAIL_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Email address to use for contacting the domain administrator.'),
            required=True,
            update_allowed=True),
        TTL:
        properties.Schema(properties.Schema.INTEGER,
                          _('How long other servers should cache recorddata.'),
                          default=3600,
                          constraints=[
                              constraints.Range(min=301),
                          ],
                          update_allowed=True),
        COMMENT:
        properties.Schema(properties.Schema.STRING,
                          _('Optional free form text comment'),
                          constraints=[
                              constraints.Length(max=160),
                          ],
                          update_allowed=True),
        RECORDS:
        properties.Schema(
            properties.Schema.LIST,
            _('Domain records'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    RECORD_COMMENT:
                    properties.Schema(properties.Schema.STRING,
                                      _('Optional free form text comment'),
                                      constraints=[
                                          constraints.Length(max=160),
                                      ]),
                    RECORD_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies the name for the domain or '
                          'subdomain. Must be a valid domain name.'),
                        required=True,
                        constraints=[
                            constraints.Length(min=3),
                        ]),
                    RECORD_DATA:
                    properties.Schema(properties.Schema.STRING,
                                      _('Type specific record data'),
                                      required=True),
                    RECORD_PRIORITY:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Required for MX and SRV records, but '
                          'forbidden for other record types. If '
                          'specified, must be an integer from 0 to '
                          '65535.'),
                        constraints=[
                            constraints.Range(0, 65535),
                        ]),
                    RECORD_TTL:
                    properties.Schema(properties.Schema.INTEGER,
                                      _('How long other servers should cache '
                                        'recorddata.'),
                                      default=3600,
                                      constraints=[
                                          constraints.Range(min=301),
                                      ]),
                    RECORD_TYPE:
                    properties.Schema(properties.Schema.STRING,
                                      _('Specifies the record type.'),
                                      required=True,
                                      constraints=[
                                          constraints.AllowedValues([
                                              'A', 'AAAA', 'NS', 'MX', 'CNAME',
                                              'TXT', 'SRV'
                                          ]),
                                      ]),
                },
            ),
            update_allowed=True),
    }

    def cloud_dns(self):
        return self.client('cloud_dns')

    def handle_create(self):
        """Create a Rackspace CloudDns Instance."""
        # There is no check_create_complete as the pyrax create for DNS is
        # synchronous.
        LOG.debug("CloudDns handle_create called.")
        args = dict((k, v) for k, v in self.properties.items())
        for rec in args[self.RECORDS] or {}:
            # only pop the priority for the correct types
            rec_type = rec[self.RECORD_TYPE]
            if (rec_type != 'MX') and (rec_type != 'SRV'):
                rec.pop(self.RECORD_PRIORITY, None)
        dom = self.cloud_dns().create(**args)
        self.resource_id_set(dom.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Update a Rackspace CloudDns Instance."""
        LOG.debug("CloudDns handle_update called.")
        if not self.resource_id:
            raise exception.Error(_('Update called on a non-existent domain'))
        if prop_diff:
            dom = self.cloud_dns().get(self.resource_id)

            # handle records separately
            records = prop_diff.pop(self.RECORDS, {})

            # Handle top level domain properties
            dom.update(**prop_diff)

        # handle records
        if records:
            recs = dom.list_records()
            # 1. delete all the current records other than rackspace NS records
            [
                rec.delete() for rec in recs
                if rec.type != 'NS' or 'stabletransit.com' not in rec.data
            ]
            # 2. update with the new records in prop_diff
            dom.add_records(records)

    def handle_delete(self):
        """Delete a Rackspace CloudDns Instance."""
        LOG.debug("CloudDns handle_delete called.")
        if self.resource_id:
            try:
                dom = self.cloud_dns().get(self.resource_id)
                dom.delete()
            except NotFound:
                pass
Ejemplo n.º 2
0
class CloudLoadBalancer(resource.Resource):
    """Represents a Rackspace Cloud Loadbalancer."""

    PROPERTIES = (
        NAME,
        NODES,
        PROTOCOL,
        ACCESS_LIST,
        HALF_CLOSED,
        ALGORITHM,
        CONNECTION_LOGGING,
        METADATA,
        PORT,
        TIMEOUT,
        CONNECTION_THROTTLE,
        SESSION_PERSISTENCE,
        VIRTUAL_IPS,
        CONTENT_CACHING,
        HEALTH_MONITOR,
        SSL_TERMINATION,
        ERROR_PAGE,
        HTTPS_REDIRECT,
    ) = (
        'name',
        'nodes',
        'protocol',
        'accessList',
        'halfClosed',
        'algorithm',
        'connectionLogging',
        'metadata',
        'port',
        'timeout',
        'connectionThrottle',
        'sessionPersistence',
        'virtualIps',
        'contentCaching',
        'healthMonitor',
        'sslTermination',
        'errorPage',
        'httpsRedirect',
    )

    LB_UPDATE_PROPS = (NAME, ALGORITHM, PROTOCOL, HALF_CLOSED, PORT, TIMEOUT,
                       HTTPS_REDIRECT)

    _NODE_KEYS = (
        NODE_ADDRESSES,
        NODE_PORT,
        NODE_CONDITION,
        NODE_TYPE,
        NODE_WEIGHT,
    ) = (
        'addresses',
        'port',
        'condition',
        'type',
        'weight',
    )

    _ACCESS_LIST_KEYS = (
        ACCESS_LIST_ADDRESS,
        ACCESS_LIST_TYPE,
    ) = (
        'address',
        'type',
    )

    _CONNECTION_THROTTLE_KEYS = (
        CONNECTION_THROTTLE_MAX_CONNECTION_RATE,
        CONNECTION_THROTTLE_MIN_CONNECTIONS,
        CONNECTION_THROTTLE_MAX_CONNECTIONS,
        CONNECTION_THROTTLE_RATE_INTERVAL,
    ) = (
        'maxConnectionRate',
        'minConnections',
        'maxConnections',
        'rateInterval',
    )

    _VIRTUAL_IP_KEYS = (VIRTUAL_IP_TYPE, VIRTUAL_IP_IP_VERSION,
                        VIRTUAL_IP_ID) = ('type', 'ipVersion', 'id')

    _HEALTH_MONITOR_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
        HEALTH_MONITOR_BODY_REGEX,
        HEALTH_MONITOR_HOST_HEADER,
        HEALTH_MONITOR_PATH,
        HEALTH_MONITOR_STATUS_REGEX,
    ) = (
        'attemptsBeforeDeactivation',
        'delay',
        'timeout',
        'type',
        'bodyRegex',
        'hostHeader',
        'path',
        'statusRegex',
    )
    _HEALTH_MONITOR_CONNECT_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
    )

    _SSL_TERMINATION_KEYS = (
        SSL_TERMINATION_SECURE_PORT,
        SSL_TERMINATION_PRIVATEKEY,
        SSL_TERMINATION_CERTIFICATE,
        SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
        SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
    ) = (
        'securePort',
        'privatekey',
        'certificate',
        'intermediateCertificate',
        'secureTrafficOnly',
    )

    ATTRIBUTES = (PUBLIC_IP, VIPS) = ('PublicIp', 'virtualIps')

    ALGORITHMS = [
        "LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
        "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"
    ]

    _health_monitor_schema = {
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 10),
                          ]),
        HEALTH_MONITOR_DELAY:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 3600),
                          ]),
        HEALTH_MONITOR_TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 300),
                          ]),
        HEALTH_MONITOR_TYPE:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['CONNECT', 'HTTP', 'HTTPS']),
                          ]),
        HEALTH_MONITOR_BODY_REGEX:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_HOST_HEADER:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_PATH:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_STATUS_REGEX:
        properties.Schema(properties.Schema.STRING),
    }

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        NODES:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NODE_ADDRESSES:
                    properties.Schema(
                        properties.Schema.LIST,
                        required=True,
                        description=(_("IP addresses for the load balancer "
                                       "node. Must have at least one "
                                       "address.")),
                        schema=properties.Schema(properties.Schema.STRING)),
                    NODE_PORT:
                    properties.Schema(properties.Schema.INTEGER,
                                      required=True),
                    NODE_CONDITION:
                    properties.Schema(properties.Schema.STRING,
                                      default='ENABLED',
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['ENABLED', 'DISABLED']),
                                      ]),
                    NODE_TYPE:
                    properties.Schema(properties.Schema.STRING,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['PRIMARY', 'SECONDARY']),
                                      ]),
                    NODE_WEIGHT:
                    properties.Schema(properties.Schema.NUMBER,
                                      constraints=[
                                          constraints.Range(1, 100),
                                      ]),
                },
            ),
            required=True,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS',
                                  'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL',
                                  'POP3', 'POP3S', 'SMTP', 'TCP',
                                  'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM',
                                  'SFTP'
                              ]),
                          ],
                          update_allowed=True),
        ACCESS_LIST:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  ACCESS_LIST_ADDRESS:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  ACCESS_LIST_TYPE:
                                  properties.Schema(
                                      properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['ALLOW', 'DENY']),
                                      ]),
                              },
                          )),
        HALF_CLOSED:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        ALGORITHM:
        properties.Schema(properties.Schema.STRING,
                          constraints=[constraints.AllowedValues(ALGORITHMS)],
                          update_allowed=True),
        CONNECTION_LOGGING:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        METADATA:
        properties.Schema(properties.Schema.MAP, update_allowed=True),
        PORT:
        properties.Schema(properties.Schema.INTEGER,
                          required=True,
                          update_allowed=True),
        TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          constraints=[
                              constraints.Range(1, 120),
                          ],
                          update_allowed=True),
        CONNECTION_THROTTLE:
        properties.Schema(properties.Schema.MAP,
                          schema={
                              CONNECTION_THROTTLE_MAX_CONNECTION_RATE:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(
                                                        0, 100000),
                                                ]),
                              CONNECTION_THROTTLE_MIN_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(1, 1000),
                                                ]),
                              CONNECTION_THROTTLE_MAX_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(
                                                        1, 100000),
                                                ]),
                              CONNECTION_THROTTLE_RATE_INTERVAL:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(1, 3600),
                                                ]),
                          },
                          update_allowed=True),
        SESSION_PERSISTENCE:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['HTTP_COOKIE', 'SOURCE_IP']),
                          ],
                          update_allowed=True),
        VIRTUAL_IPS:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    VIRTUAL_IP_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        "The type of VIP (public or internal). This property"
                        " cannot be specified if 'id' is specified. This "
                        "property must be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['SERVICENET',
                                                       'PUBLIC']),
                        ]),
                    VIRTUAL_IP_IP_VERSION:
                    properties.Schema(
                        properties.Schema.STRING,
                        "IP version of the VIP. This property cannot be "
                        "specified if 'id' is specified. This property must "
                        "be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['IPV6', 'IPV4']),
                        ]),
                    VIRTUAL_IP_ID:
                    properties.Schema(
                        properties.Schema.NUMBER,
                        "ID of a shared VIP to use instead of creating a "
                        "new one. This property cannot be specified if type"
                        " or version is specified.")
                },
            ),
            required=True,
            constraints=[constraints.Length(min=1)]),
        CONTENT_CACHING:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['ENABLED', 'DISABLED']),
                          ],
                          update_allowed=True),
        HEALTH_MONITOR:
        properties.Schema(properties.Schema.MAP,
                          schema=_health_monitor_schema,
                          update_allowed=True),
        SSL_TERMINATION:
        properties.Schema(
            properties.Schema.MAP,
            schema={
                SSL_TERMINATION_SECURE_PORT:
                properties.Schema(properties.Schema.INTEGER, default=443),
                SSL_TERMINATION_PRIVATEKEY:
                properties.Schema(properties.Schema.STRING, required=True),
                SSL_TERMINATION_CERTIFICATE:
                properties.Schema(properties.Schema.STRING, required=True),
                # only required if configuring intermediate ssl termination
                # add to custom validation
                SSL_TERMINATION_INTERMEDIATE_CERTIFICATE:
                properties.Schema(properties.Schema.STRING),
                # pyrax will default to false
                SSL_TERMINATION_SECURE_TRAFFIC_ONLY:
                properties.Schema(properties.Schema.BOOLEAN, default=False),
            },
            update_allowed=True),
        ERROR_PAGE:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        HTTPS_REDIRECT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Enables or disables HTTP to HTTPS redirection for the load "
              "balancer. When enabled, any HTTP request returns status code "
              "301 (Moved Permanently), and the requester is redirected to "
              "the requested URL via the HTTPS protocol on port 443. Only "
              "available for HTTPS protocol (port=443), or HTTP protocol with "
              "a properly configured SSL termination (secureTrafficOnly=true, "
              "securePort=443)."),
            update_allowed=True,
            default=False,
            support_status=support.SupportStatus(version="2015.1"))
    }

    attributes_schema = {
        PUBLIC_IP:
        attributes.Schema(_('Public IP address of the specified instance.')),
        VIPS:
        attributes.Schema(_("A list of assigned virtual ip addresses"))
    }

    def __init__(self, name, json_snippet, stack):
        super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
        self.clb = self.cloud_lb()

    def cloud_lb(self):
        return self.client('cloud_lb')

    def _setup_properties(self, properties, function):
        """Use defined schema properties as kwargs for loadbalancer objects."""
        if properties and function:
            return [
                function(**self._remove_none(item_dict))
                for item_dict in properties
            ]
        elif function:
            return [function()]

    def _alter_properties_for_api(self):
        """Set up required, but useless, key/value pairs.

        The following properties have useless key/value pairs which must
        be passed into the api. Set them up to make template definition easier.
        """
        session_persistence = None
        if self.SESSION_PERSISTENCE in self.properties.data:
            session_persistence = {
                'persistenceType': self.properties[self.SESSION_PERSISTENCE]
            }
        connection_logging = None
        if self.CONNECTION_LOGGING in self.properties.data:
            connection_logging = {
                "enabled": self.properties[self.CONNECTION_LOGGING]
            }
        metadata = None
        if self.METADATA in self.properties.data:
            metadata = [{
                'key': k,
                'value': v
            } for k, v in six.iteritems(self.properties[self.METADATA])]

        return (session_persistence, connection_logging, metadata)

    def _check_status(self, loadbalancer, status_list):
        """Update the loadbalancer state, check the status."""
        loadbalancer.get()
        if loadbalancer.status in status_list:
            return True
        else:
            return False

    def _configure_post_creation(self, loadbalancer):
        """Configure all load balancer properties post creation.

        These properties can only be set after the load balancer is created.
        """
        if self.properties[self.ACCESS_LIST]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.add_access_list(self.properties[self.ACCESS_LIST])

        if self.properties[self.ERROR_PAGE]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.set_error_page(self.properties[self.ERROR_PAGE])

        if self.properties[self.SSL_TERMINATION]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            ssl_term = self.properties[self.SSL_TERMINATION]
            loadbalancer.add_ssl_termination(
                ssl_term[self.SSL_TERMINATION_SECURE_PORT],
                ssl_term[self.SSL_TERMINATION_PRIVATEKEY],
                ssl_term[self.SSL_TERMINATION_CERTIFICATE],
                intermediateCertificate=ssl_term[
                    self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE],
                enabled=True,
                secureTrafficOnly=ssl_term[
                    self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY])

        if self.CONTENT_CACHING in self.properties:
            enabled = self.properties[self.CONTENT_CACHING] == 'ENABLED'
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.content_caching = enabled

    def _process_node(self, node):
        if not node.get(self.NODE_ADDRESSES):
            yield node
        else:
            for addr in node.get(self.NODE_ADDRESSES):
                norm_node = copy.deepcopy(node)
                norm_node['address'] = addr
                del norm_node[self.NODE_ADDRESSES]
                yield norm_node

    def _process_nodes(self, node_list):
        node_itr = six.moves.map(self._process_node, node_list)
        return itertools.chain.from_iterable(node_itr)

    def handle_create(self):
        node_list = self._process_nodes(self.properties.get(self.NODES))
        nodes = [self.clb.Node(**node) for node in node_list]
        vips = self.properties.get(self.VIRTUAL_IPS)

        virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)

        (session_persistence, connection_logging,
         metadata) = self._alter_properties_for_api()

        lb_body = {
            'port': self.properties[self.PORT],
            'protocol': self.properties[self.PROTOCOL],
            'nodes': nodes,
            'virtual_ips': virtual_ips,
            'algorithm': self.properties.get(self.ALGORITHM),
            'halfClosed': self.properties.get(self.HALF_CLOSED),
            'connectionThrottle':
            self.properties.get(self.CONNECTION_THROTTLE),
            'metadata': metadata,
            'healthMonitor': self.properties.get(self.HEALTH_MONITOR),
            'sessionPersistence': session_persistence,
            'timeout': self.properties.get(self.TIMEOUT),
            'connectionLogging': connection_logging,
            self.HTTPS_REDIRECT: self.properties[self.HTTPS_REDIRECT]
        }

        lb_name = (self.properties.get(self.NAME)
                   or self.physical_resource_name())
        LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
        loadbalancer = self.clb.create(lb_name, **lb_body)
        self.resource_id_set(str(loadbalancer.id))

        post_create = scheduler.TaskRunner(self._configure_post_creation,
                                           loadbalancer)
        post_create(timeout=600)
        return loadbalancer

    def check_create_complete(self, loadbalancer):
        return self._check_status(loadbalancer, ['ACTIVE'])

    def handle_check(self):
        loadbalancer = self.clb.get(self.resource_id)
        if not self._check_status(loadbalancer, ['ACTIVE']):
            raise exception.Error(
                _("Cloud LoadBalancer is not ACTIVE "
                  "(was: %s)") % loadbalancer.status)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Add and remove nodes specified in the prop_diff."""
        lb = self.clb.get(self.resource_id)
        checkers = []

        if self.NODES in prop_diff:
            updated_nodes = prop_diff[self.NODES]
            checkers.extend(self._update_nodes(lb, updated_nodes))

        updated_props = {}
        for prop in six.iterkeys(prop_diff):
            if prop in self.LB_UPDATE_PROPS:
                updated_props[prop] = prop_diff[prop]
        if updated_props:
            checkers.append(self._update_lb_properties(lb, updated_props))

        if self.HEALTH_MONITOR in prop_diff:
            updated_hm = prop_diff[self.HEALTH_MONITOR]
            checkers.append(self._update_health_monitor(lb, updated_hm))

        if self.SESSION_PERSISTENCE in prop_diff:
            updated_sp = prop_diff[self.SESSION_PERSISTENCE]
            checkers.append(self._update_session_persistence(lb, updated_sp))

        if self.SSL_TERMINATION in prop_diff:
            updated_ssl_term = prop_diff[self.SSL_TERMINATION]
            checkers.append(self._update_ssl_termination(lb, updated_ssl_term))

        if self.METADATA in prop_diff:
            updated_metadata = prop_diff[self.METADATA]
            checkers.append(self._update_metadata(lb, updated_metadata))

        if self.ERROR_PAGE in prop_diff:
            updated_errorpage = prop_diff[self.ERROR_PAGE]
            checkers.append(self._update_errorpage(lb, updated_errorpage))

        if self.CONNECTION_LOGGING in prop_diff:
            updated_cl = prop_diff[self.CONNECTION_LOGGING]
            checkers.append(self._update_connection_logging(lb, updated_cl))

        if self.CONNECTION_THROTTLE in prop_diff:
            updated_ct = prop_diff[self.CONNECTION_THROTTLE]
            checkers.append(self._update_connection_throttle(lb, updated_ct))

        if self.CONTENT_CACHING in prop_diff:
            updated_cc = prop_diff[self.CONTENT_CACHING]
            checkers.append(self._update_content_caching(lb, updated_cc))

        return checkers

    def _update_nodes(self, lb, updated_nodes):
        @retry_if_immutable
        def add_nodes(lb, new_nodes):
            lb.add_nodes(new_nodes)

        @retry_if_immutable
        def remove_node(known, node):
            known[node].delete()

        @retry_if_immutable
        def update_node(known, node):
            known[node].update()

        checkers = []
        current_nodes = lb.nodes
        diff_nodes = self._process_nodes(updated_nodes)
        # Loadbalancers can be uniquely identified by address and
        # port.  Old is a dict of all nodes the loadbalancer
        # currently knows about.
        old = dict(("{0.address}{0.port}".format(node), node)
                   for node in current_nodes)
        # New is a dict of the nodes the loadbalancer will know
        # about after this update.
        new = dict(("%s%s" % (node["address"], node[self.NODE_PORT]), node)
                   for node in diff_nodes)

        old_set = set(six.iterkeys(old))
        new_set = set(six.iterkeys(new))

        deleted = old_set.difference(new_set)
        added = new_set.difference(old_set)
        updated = new_set.intersection(old_set)

        if len(current_nodes) + len(added) - len(deleted) < 1:
            raise ValueError(
                _("The loadbalancer:%s requires at least one "
                  "node.") % self.name)
        """
        Add loadbalancers in the new map that are not in the old map.
        Add before delete to avoid deleting the last node and getting in
        an invalid state.
        """
        new_nodes = [self.clb.Node(**new[lb_node]) for lb_node in added]
        if new_nodes:
            checkers.append(scheduler.TaskRunner(add_nodes, lb, new_nodes))

        # Delete loadbalancers in the old dict that are not in the
        # new dict.
        for node in deleted:
            checkers.append(scheduler.TaskRunner(remove_node, old, node))

        # Update nodes that have been changed
        for node in updated:
            node_changed = False
            for attribute in six.iterkeys(new[node]):
                new_value = new[node][attribute]
                if new_value and new_value != getattr(old[node], attribute):
                    node_changed = True
                    setattr(old[node], attribute, new_value)
            if node_changed:
                checkers.append(scheduler.TaskRunner(update_node, old, node))

        return checkers

    def _update_lb_properties(self, lb, updated_props):
        @retry_if_immutable
        def update_lb():
            lb.update(**updated_props)

        return scheduler.TaskRunner(update_lb)

    def _update_health_monitor(self, lb, updated_hm):
        @retry_if_immutable
        def add_health_monitor():
            lb.add_health_monitor(**updated_hm)

        @retry_if_immutable
        def delete_health_monitor():
            lb.delete_health_monitor()

        if updated_hm is None:
            return scheduler.TaskRunner(delete_health_monitor)
        else:
            # Adding a health monitor is a destructive, so there's
            # no need to delete, then add
            return scheduler.TaskRunner(add_health_monitor)

    def _update_session_persistence(self, lb, updated_sp):
        @retry_if_immutable
        def add_session_persistence():
            lb.session_persistence = updated_sp

        @retry_if_immutable
        def delete_session_persistence():
            lb.session_persistence = ''

        if updated_sp is None:
            return scheduler.TaskRunner(delete_session_persistence)
        else:
            # Adding session persistence is destructive
            return scheduler.TaskRunner(add_session_persistence)

    def _update_ssl_termination(self, lb, updated_ssl_term):
        @retry_if_immutable
        def add_ssl_termination():
            lb.add_ssl_termination(**updated_ssl_term)

        @retry_if_immutable
        def delete_ssl_termination():
            lb.delete_ssl_termination()

        if updated_ssl_term is None:
            return scheduler.TaskRunner(delete_ssl_termination)
        else:
            # Adding SSL termination is destructive
            return scheduler.TaskRunner(add_ssl_termination)

    def _update_metadata(self, lb, updated_metadata):
        @retry_if_immutable
        def add_metadata():
            lb.set_metadata(updated_metadata)

        @retry_if_immutable
        def delete_metadata():
            lb.delete_metadata()

        if updated_metadata is None:
            return scheduler.TaskRunner(delete_metadata)
        else:
            return scheduler.TaskRunner(add_metadata)

    def _update_errorpage(self, lb, updated_errorpage):
        @retry_if_immutable
        def add_errorpage():
            lb.set_error_page(updated_errorpage)

        @retry_if_immutable
        def delete_errorpage():
            lb.clear_error_page()

        if updated_errorpage is None:
            return scheduler.TaskRunner(delete_errorpage)
        else:
            return scheduler.TaskRunner(add_errorpage)

    def _update_connection_logging(self, lb, updated_cl):
        @retry_if_immutable
        def enable_connection_logging():
            lb.connection_logging = True

        @retry_if_immutable
        def disable_connection_logging():
            lb.connection_logging = False

        if updated_cl:
            return scheduler.TaskRunner(enable_connection_logging)
        else:
            return scheduler.TaskRunner(disable_connection_logging)

    def _update_connection_throttle(self, lb, updated_ct):
        @retry_if_immutable
        def add_connection_throttle():
            lb.add_connection_throttle(**updated_ct)

        @retry_if_immutable
        def delete_connection_throttle():
            lb.delete_connection_throttle()

        if updated_ct is None:
            return scheduler.TaskRunner(delete_connection_throttle)
        else:
            return scheduler.TaskRunner(add_connection_throttle)

    def _update_content_caching(self, lb, updated_cc):
        @retry_if_immutable
        def enable_content_caching():
            lb.content_caching = True

        @retry_if_immutable
        def disable_content_caching():
            lb.content_caching = False

        if updated_cc == 'ENABLED':
            return scheduler.TaskRunner(enable_content_caching)
        else:
            return scheduler.TaskRunner(disable_content_caching)

    def check_update_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def handle_delete(self):
        @retry_if_immutable
        def delete_lb(lb):
            lb.delete()

        if self.resource_id is None:
            return
        try:
            loadbalancer = self.clb.get(self.resource_id)
        except NotFound:
            pass
        else:
            if loadbalancer.status != 'DELETED':
                task = scheduler.TaskRunner(delete_lb, loadbalancer)
                task.start()
                return task

    def check_delete_complete(self, task):
        if task and not task.step():
            return False

        return True

    def _remove_none(self, property_dict):
        """Remove None values that would cause schema validation problems.

        These are values that may be initialized to None.
        """
        return dict((key, value)
                    for (key, value) in six.iteritems(property_dict)
                    if value is not None)

    def validate(self):
        """Validate any of the provided params."""
        res = super(CloudLoadBalancer, self).validate()
        if res:
            return res

        if self.properties.get(self.HALF_CLOSED):
            if not (self.properties[self.PROTOCOL] == 'TCP'
                    or self.properties[self.PROTOCOL] == 'TCP_CLIENT_FIRST'):
                message = (_('The %s property is only available for the TCP '
                             'or TCP_CLIENT_FIRST protocols') %
                           self.HALF_CLOSED)
                raise exception.StackValidationFailed(message=message)

        # health_monitor connect and http types require completely different
        # schema
        if self.properties.get(self.HEALTH_MONITOR):
            prop_val = self.properties[self.HEALTH_MONITOR]
            health_monitor = self._remove_none(prop_val)

            schema = self._health_monitor_schema
            if health_monitor[self.HEALTH_MONITOR_TYPE] == 'CONNECT':
                schema = dict((k, v) for k, v in schema.items()
                              if k in self._HEALTH_MONITOR_CONNECT_KEYS)
            properties.Properties(schema, health_monitor, function.resolve,
                                  self.name).validate()

        # validate if HTTPS_REDIRECT is true and we're not HTTPS
        redir = self.properties[self.HTTPS_REDIRECT]
        proto = self.properties[self.PROTOCOL]

        if redir and (proto != "HTTPS"):
            termcfg = self.properties.get(self.SSL_TERMINATION) or {}
            seconly = termcfg.get(self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
                                  False)
            secport = termcfg.get(self.SSL_TERMINATION_SECURE_PORT, 0)
            if not (seconly and (secport == 443) and (proto == "HTTP")):
                message = _("HTTPS redirect is only available for the HTTPS "
                            "protocol (port=443), or the HTTP protocol with "
                            "a properly configured SSL termination "
                            "(secureTrafficOnly=true, securePort=443).")
                raise exception.StackValidationFailed(message=message)

        # if a vip specifies and id, it can't specify version or type;
        # otherwise version and type are required
        for vip in self.properties.get(self.VIRTUAL_IPS, []):
            has_id = vip.get(self.VIRTUAL_IP_ID) is not None
            has_version = vip.get(self.VIRTUAL_IP_IP_VERSION) is not None
            has_type = vip.get(self.VIRTUAL_IP_TYPE) is not None
            if has_id:
                if (has_version or has_type):
                    message = _("Cannot specify type or version if VIP id is"
                                " specified.")
                    raise exception.StackValidationFailed(message=message)
            elif not (has_version and has_type):
                message = _("Must specify VIP type and version if no id "
                            "specified.")
                raise exception.StackValidationFailed(message=message)

    def _public_ip(self, lb):
        for ip in lb.virtual_ips:
            if ip.type == 'PUBLIC':
                return six.text_type(ip.address)

    def _resolve_attribute(self, key):
        if self.resource_id:
            lb = self.clb.get(self.resource_id)
            attribute_function = {
                self.PUBLIC_IP:
                self._public_ip(lb),
                self.VIPS: [{
                    "id": vip.id,
                    "type": vip.type,
                    "ip_version": vip.ip_version,
                    "address": vip.address
                } for vip in lb.virtual_ips]
            }
            if key not in attribute_function:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            function = attribute_function[key]
            LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'), {
                'name': self.name,
                'key': key,
                'function': function
            })
            return function
Ejemplo n.º 3
0
class ResourceGroup(stack_resource.StackResource):
    """Creates one or more identically configured nested resources.

    In addition to the `refs` attribute, this resource implements synthetic
    attributes that mirror those of the resources in the group. When
    getting an attribute from this resource, however, a list of attribute
    values for each resource in the group is returned. To get attribute values
    for a single resource in the group, synthetic attributes of the form
    `resource.{resource index}.{attribute name}` can be used. The resource ID
    of a particular resource in the group can be obtained via the synthetic
    attribute `resource.{resource index}`. Note, that if you get attribute
    without `{resource index}`, e.g. `[resource, {attribute_name}]`, you'll get
    a list of this attribute's value for all resources in group.

    While each resource in the group will be identically configured, this
    resource does allow for some index-based customization of the properties
    of the resources in the group. For example::

      resources:
        my_indexed_group:
          type: OS::Heat::ResourceGroup
          properties:
            count: 3
            resource_def:
              type: OS::Nova::Server
              properties:
                # create a unique name for each server
                # using its index in the group
                name: my_server_%index%
                image: CentOS 6.5
                flavor: 4GB Performance

    would result in a group of three servers having the same image and flavor,
    but names of `my_server_0`, `my_server_1`, and `my_server_2`. The variable
    used for substitution can be customized by using the `index_var` property.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        COUNT,
        INDEX_VAR,
        RESOURCE_DEF,
        REMOVAL_POLICIES,
    ) = (
        'count',
        'index_var',
        'resource_def',
        'removal_policies',
    )

    _RESOURCE_DEF_KEYS = (
        RESOURCE_DEF_TYPE,
        RESOURCE_DEF_PROPERTIES,
        RESOURCE_DEF_METADATA,
    ) = (
        'type',
        'properties',
        'metadata',
    )

    _REMOVAL_POLICIES_KEYS = (REMOVAL_RSRC_LIST, ) = ('resource_list', )

    _ROLLING_UPDATES_SCHEMA_KEYS = (
        MIN_IN_SERVICE,
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'min_in_service',
        'max_batch_size',
        'pause_time',
    )

    _BATCH_CREATE_SCHEMA_KEYS = (
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'max_batch_size',
        'pause_time',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE,
        BATCH_CREATE,
    ) = (
        'rolling_update',
        'batch_create',
    )

    ATTRIBUTES = (REFS, REFS_MAP, ATTR_ATTRIBUTES,
                  REMOVED_RSRC_LIST) = ('refs', 'refs_map', 'attributes',
                                        'removed_rsrc_list')

    properties_schema = {
        COUNT:
        properties.Schema(properties.Schema.INTEGER,
                          _('The number of resources to create.'),
                          default=1,
                          constraints=[
                              constraints.Range(min=0),
                          ],
                          update_allowed=True),
        INDEX_VAR:
        properties.Schema(
            properties.Schema.STRING,
            _('A variable that this resource will use to replace with the '
              'current index of a given resource in the group. Can be used, '
              'for example, to customize the name property of grouped '
              'servers in order to differentiate them when listed with '
              'nova client.'),
            default="%index%",
            constraints=[constraints.Length(min=3)],
            support_status=support.SupportStatus(version='2014.2')),
        RESOURCE_DEF:
        properties.Schema(
            properties.Schema.MAP,
            _('Resource definition for the resources in the group. The value '
              'of this property is the definition of a resource just as if '
              'it had been declared in the template itself.'),
            schema={
                RESOURCE_DEF_TYPE:
                properties.Schema(properties.Schema.STRING,
                                  _('The type of the resources in the group.'),
                                  required=True),
                RESOURCE_DEF_PROPERTIES:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Property values for the resources in the group.')),
                RESOURCE_DEF_METADATA:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Supplied metadata for the resources in the group.'),
                    support_status=support.SupportStatus(version='5.0.0')),
            },
            required=True,
            update_allowed=True),
        REMOVAL_POLICIES:
        properties.Schema(
            properties.Schema.LIST,
            _('Policies for removal of resources on update.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                _('Policy to be processed when doing an update which '
                  'requires removal of specific resources.'),
                schema={
                    REMOVAL_RSRC_LIST:
                    properties.Schema(
                        properties.Schema.LIST,
                        _("List of resources to be removed "
                          "when doing an update which requires removal of "
                          "specific resources. "
                          "The resource may be specified several ways: "
                          "(1) The resource name, as in the nested stack, "
                          "(2) The resource reference returned from "
                          "get_resource in a template, as available via "
                          "the 'refs' attribute. "
                          "Note this is destructive on update when specified; "
                          "even if the count is not being reduced, and once "
                          "a resource name is removed, its name is never "
                          "reused in subsequent updates."),
                        default=[]),
                },
            ),
            update_allowed=True,
            default=[],
            support_status=support.SupportStatus(version='2015.1')),
    }

    attributes_schema = {
        REFS:
        attributes.Schema(
            _("A list of resource IDs for the resources in the group."),
            type=attributes.Schema.LIST),
        REFS_MAP:
        attributes.Schema(
            _("A map of resource names to IDs for the resources in "
              "the group."),
            type=attributes.Schema.MAP,
            support_status=support.SupportStatus(version='7.0.0'),
        ),
        ATTR_ATTRIBUTES:
        attributes.Schema(
            _("A map of resource names to the specified attribute of each "
              "individual resource. "
              "Requires heat_template_version: 2014-10-16."),
            support_status=support.SupportStatus(version='2014.2'),
            type=attributes.Schema.MAP),
        REMOVED_RSRC_LIST:
        attributes.Schema(
            _("A list of removed resource names."),
            support_status=support.SupportStatus(version='7.0.0'),
            type=attributes.Schema.LIST),
    }

    rolling_update_schema = {
        MIN_IN_SERVICE:
        properties.Schema(properties.Schema.INTEGER,
                          _('The minimum number of resources in service while '
                            'rolling updates are being executed.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
        MAX_BATCH_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum number of resources to replace at once.'),
            constraints=[constraints.Range(min=1)],
            default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.NUMBER,
                          _('The number of seconds to wait between batches of '
                            'updates.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
    }

    batch_create_schema = {
        MAX_BATCH_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum number of resources to create at once.'),
            constraints=[constraints.Range(min=1)],
            default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.NUMBER,
                          _('The number of seconds to wait between batches.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
    }

    update_policy_schema = {
        ROLLING_UPDATE:
        properties.Schema(
            properties.Schema.MAP,
            schema=rolling_update_schema,
            support_status=support.SupportStatus(version='5.0.0')),
        BATCH_CREATE:
        properties.Schema(
            properties.Schema.MAP,
            schema=batch_create_schema,
            support_status=support.SupportStatus(version='5.0.0'))
    }

    def get_size(self):
        return self.properties.get(self.COUNT)

    def validate_nested_stack(self):
        # Only validate the resource definition (which may be a
        # nested template) if count is non-zero, to enable folks
        # to disable features via a zero count if they wish
        if not self.get_size():
            return

        first_name = next(self._resource_names(update_rsrc_data=False))
        test_tmpl = self._assemble_nested([first_name], include_all=True)
        res_def = next(
            six.itervalues(test_tmpl.resource_definitions(self.stack)))
        # make sure we can resolve the nested resource type
        self.stack.env.get_class_to_instantiate(res_def.resource_type)

        try:
            name = "%s-%s" % (self.stack.name, self.name)
            nested_stack = self._parse_nested_stack(name, test_tmpl,
                                                    self.child_params())
            nested_stack.strict_validate = False
            nested_stack.validate()
        except Exception as ex:
            path = "%s<%s>" % (self.name, self.template_url)
            raise exception.StackValidationFailed(
                ex, path=[self.stack.t.RESOURCES, path])

    def _current_blacklist(self):
        db_rsrc_names = self.data().get('name_blacklist')
        if db_rsrc_names:
            return db_rsrc_names.split(',')
        else:
            return []

    def _name_blacklist(self, update_rsrc_data=True):
        """Resolve the remove_policies to names for removal."""

        nested = self.nested()

        # To avoid reusing names after removal, we store a comma-separated
        # blacklist in the resource data
        current_blacklist = self._current_blacklist()

        # Now we iterate over the removal policies, and update the blacklist
        # with any additional names
        rsrc_names = set(current_blacklist)

        for r in self.properties[self.REMOVAL_POLICIES]:
            if self.REMOVAL_RSRC_LIST in r:
                # Tolerate string or int list values
                for n in r[self.REMOVAL_RSRC_LIST]:
                    str_n = six.text_type(n)
                    if not nested or str_n in nested:
                        rsrc_names.add(str_n)
                        continue
                    rsrc = nested.resource_by_refid(str_n)
                    if rsrc:
                        rsrc_names.add(rsrc.name)

        # If the blacklist has changed, update the resource data
        if update_rsrc_data and rsrc_names != set(current_blacklist):
            self.data_set('name_blacklist', ','.join(rsrc_names))
        return rsrc_names

    def _resource_names(self, size=None, update_rsrc_data=True):
        name_blacklist = self._name_blacklist(update_rsrc_data)
        if size is None:
            size = self.get_size()

        def is_blacklisted(name):
            return name in name_blacklist

        candidates = six.moves.map(six.text_type, itertools.count())

        return itertools.islice(
            six.moves.filterfalse(is_blacklisted, candidates), size)

    def _count_black_listed(self):
        """Return the number of current resource names that are blacklisted."""
        existing_members = grouputils.get_member_names(self)
        return len(self._name_blacklist() & set(existing_members))

    def handle_create(self):
        if self.update_policy.get(self.BATCH_CREATE):
            batch_create = self.update_policy[self.BATCH_CREATE]
            max_batch_size = batch_create[self.MAX_BATCH_SIZE]
            pause_sec = batch_create[self.PAUSE_TIME]
            checkers = self._replace(0, max_batch_size, pause_sec)
            if checkers:
                checkers[0].start()
            return checkers
        else:
            names = self._resource_names()
            self.create_with_template(self._assemble_nested(names),
                                      self.child_params(),
                                      self.stack.timeout_secs())

    def check_create_complete(self, checkers=None):
        if checkers is None:
            return super(ResourceGroup, self).check_create_complete()
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def _run_to_completion(self, template, timeout):
        updater = self.update_with_template(template, {}, timeout)

        while not super(ResourceGroup, self).check_update_complete(updater):
            yield

    def _run_update(self, total_capacity, max_updates, timeout):
        template = self._assemble_for_rolling_update(total_capacity,
                                                     max_updates)
        return self._run_to_completion(template, timeout)

    def check_update_complete(self, checkers):
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def res_def_changed(self, prop_diff):
        return self.RESOURCE_DEF in prop_diff

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if tmpl_diff:
            # parse update policy
            if tmpl_diff.update_policy_changed():
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        checkers = []
        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff and self.res_def_changed(prop_diff):
            updaters = self._try_rolling_update()
            if updaters:
                checkers.extend(updaters)

        if not checkers:
            resizer = scheduler.TaskRunner(
                self._run_to_completion,
                self._assemble_nested(self._resource_names()),
                self.stack.timeout_mins)
            checkers.append(resizer)

        checkers[0].start()
        return checkers

    def get_attribute(self, key, *path):
        if key.startswith("resource."):
            return grouputils.get_nested_attrs(self, key, False, *path)

        names = self._resource_names()
        if key == self.REFS:
            vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names]
            return attributes.select_from_attribute(vals, path)
        if key == self.REFS_MAP:
            refs_map = {
                n: grouputils.get_rsrc_id(self, key, False, n)
                for n in names
            }
            return refs_map
        if key == self.REMOVED_RSRC_LIST:
            return self._current_blacklist()
        if key == self.ATTR_ATTRIBUTES:
            if not path:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            return dict(
                (n, grouputils.get_rsrc_attr(self, key, False, n, *path))
                for n in names)

        path = [key] + list(path)
        return [
            grouputils.get_rsrc_attr(self, key, False, n, *path) for n in names
        ]

    def _nested_output_defns(self, resource_names, get_attr_fn):
        for attr in self.referenced_attrs():
            if isinstance(attr, six.string_types):
                key, path = attr, []
                output_name = attr
            else:
                key, path = attr[0], list(attr[1:])
                output_name = ', '.join(attr)

            if key.startswith("resource."):
                keycomponents = key.split('.', 2)
                res_name = keycomponents[1]
                attr_name = keycomponents[2:]
                if attr_name and (res_name in resource_names):
                    value = get_attr_fn([res_name] + attr_name + path)
                    yield output.OutputDefinition(output_name, value)

            elif key == self.ATTR_ATTRIBUTES and path:
                value = {r: get_attr_fn([r] + path) for r in resource_names}
                yield output.OutputDefinition(output_name, value)

            elif key not in self.ATTRIBUTES:
                value = [get_attr_fn([r, key] + path) for r in resource_names]
                yield output.OutputDefinition(output_name, value)

    def build_resource_definition(self, res_name, res_defn):
        res_def = copy.deepcopy(res_defn)

        props = res_def.get(self.RESOURCE_DEF_PROPERTIES)
        if props:
            props = self._handle_repl_val(res_name, props)

        res_type = res_def[self.RESOURCE_DEF_TYPE]
        meta = res_def[self.RESOURCE_DEF_METADATA]

        return rsrc_defn.ResourceDefinition(res_name, res_type, props, meta)

    def get_resource_def(self, include_all=False):
        """Returns the resource definition portion of the group.

        :param include_all: if False, only properties for the resource
               definition that are not empty will be included
        :type include_all: bool
        :return: resource definition for the group
        :rtype: dict
        """

        # At this stage, we don't mind if all of the parameters have values
        # assigned. Pass in a custom resolver to the properties to not
        # error when a parameter does not have a user entered value.
        def ignore_param_resolve(snippet):
            if isinstance(snippet, function.Function):
                try:
                    return snippet.result()
                except exception.UserParameterMissing:
                    return None

            if isinstance(snippet, collections.Mapping):
                return dict(
                    (k, ignore_param_resolve(v)) for k, v in snippet.items())
            elif (not isinstance(snippet, six.string_types)
                  and isinstance(snippet, collections.Iterable)):
                return [ignore_param_resolve(v) for v in snippet]

            return snippet

        self.properties.resolve = ignore_param_resolve

        res_def = self.properties[self.RESOURCE_DEF]
        if not include_all:
            return self._clean_props(res_def)
        return res_def

    def _clean_props(self, res_defn):
        res_def = copy.deepcopy(res_defn)
        props = res_def.get(self.RESOURCE_DEF_PROPERTIES)
        if props:
            clean = dict((k, v) for k, v in props.items() if v is not None)
            props = clean
            res_def[self.RESOURCE_DEF_PROPERTIES] = props
        return res_def

    def _handle_repl_val(self, res_name, val):
        repl_var = self.properties[self.INDEX_VAR]

        def recurse(x):
            return self._handle_repl_val(res_name, x)

        if isinstance(val, six.string_types):
            return val.replace(repl_var, res_name)
        elif isinstance(val, collections.Mapping):
            return {k: recurse(v) for k, v in val.items()}
        elif isinstance(val, collections.Sequence):
            return [recurse(v) for v in val]
        return val

    def _assemble_nested(self,
                         names,
                         include_all=False,
                         template_version=('heat_template_version',
                                           '2015-04-30')):

        def_dict = self.get_resource_def(include_all)
        definitions = [(k, self.build_resource_definition(k, def_dict))
                       for k in names]
        tmpl = scl_template.make_template(definitions,
                                          version=template_version)

        att_func = 'get_attr'
        get_attr = functools.partial(tmpl.functions[att_func], None, att_func)
        for odefn in self._nested_output_defns([k for k, d in definitions],
                                               get_attr):
            tmpl.add_output(odefn)

        return tmpl

    def _assemble_for_rolling_update(self,
                                     total_capacity,
                                     max_updates,
                                     include_all=False,
                                     template_version=('heat_template_version',
                                                       '2015-04-30')):
        names = list(self._resource_names(total_capacity))
        name_blacklist = self._name_blacklist()

        valid_resources = [(n, d)
                           for n, d in grouputils.get_member_definitions(self)
                           if n not in name_blacklist]

        targ_cap = self.get_size()

        def replace_priority(res_item):
            name, defn = res_item
            try:
                index = names.index(name)
            except ValueError:
                # High priority - delete immediately
                return 0
            else:
                if index < targ_cap:
                    # Update higher indices first
                    return targ_cap - index
                else:
                    # Low priority - don't update
                    return total_capacity

        old_resources = sorted(valid_resources, key=replace_priority)
        existing_names = set(n for n, d in valid_resources)
        new_names = six.moves.filterfalse(lambda n: n in existing_names, names)
        res_def = self.get_resource_def(include_all)
        definitions = scl_template.member_definitions(
            old_resources, res_def, total_capacity, max_updates,
            lambda: next(new_names), self.build_resource_definition)
        return scl_template.make_template(definitions,
                                          version=template_version)

    def _try_rolling_update(self):
        if self.update_policy[self.ROLLING_UPDATE]:
            policy = self.update_policy[self.ROLLING_UPDATE]
            return self._replace(policy[self.MIN_IN_SERVICE],
                                 policy[self.MAX_BATCH_SIZE],
                                 policy[self.PAUSE_TIME])

    def _resolve_attribute(self, name):
        if name == self.REMOVED_RSRC_LIST:
            return self._current_blacklist()

    def _update_timeout(self, batch_cnt, pause_sec):
        total_pause_time = pause_sec * max(batch_cnt - 1, 0)
        if total_pause_time >= self.stack.timeout_secs():
            msg = _('The current update policy will result in stack update '
                    'timeout.')
            raise ValueError(msg)
        return self.stack.timeout_secs() - total_pause_time

    @staticmethod
    def _get_batches(targ_cap, curr_cap, batch_size, min_in_service):
        updated = 0

        while rolling_update.needs_update(targ_cap, curr_cap, updated):
            new_cap, total_new = rolling_update.next_batch(
                targ_cap, curr_cap, updated, batch_size, min_in_service)

            yield new_cap, total_new

            updated += total_new - max(new_cap - max(curr_cap, targ_cap), 0)
            curr_cap = new_cap

    def _replace(self, min_in_service, batch_size, pause_sec):
        def pause_between_batch(pause_sec):
            duration = timeutils.Duration(pause_sec)
            while not duration.expired():
                yield

        # blacklist count existing
        num_blacklist = self._count_black_listed()

        # current capacity not including existing blacklisted
        curr_cap = len(self.nested()) - num_blacklist if self.nested() else 0

        batches = list(
            self._get_batches(self.get_size(), curr_cap, batch_size,
                              min_in_service))
        update_timeout = self._update_timeout(len(batches), pause_sec)

        def tasks():
            for index, (curr_cap, max_upd) in enumerate(batches):
                yield scheduler.TaskRunner(self._run_update, curr_cap, max_upd,
                                           update_timeout)

                if index < (len(batches) - 1) and pause_sec > 0:
                    yield scheduler.TaskRunner(pause_between_batch, pause_sec)

        return list(tasks())

    def child_template(self):
        names = self._resource_names()
        return self._assemble_nested(names)

    def child_params(self):
        return {}

    def handle_adopt(self, resource_data):
        names = self._resource_names()
        if names:
            return self.create_with_template(self._assemble_nested(names), {},
                                             adopt_data=resource_data)
Ejemplo n.º 4
0
class CinderVolume(aws_vol.Volume):

    PROPERTIES = (
        AVAILABILITY_ZONE,
        SIZE,
        SNAPSHOT_ID,
        BACKUP_ID,
        NAME,
        DESCRIPTION,
        VOLUME_TYPE,
        METADATA,
        IMAGE_REF,
        IMAGE,
        SOURCE_VOLID,
        CINDER_SCHEDULER_HINTS,
    ) = (
        'availability_zone',
        'size',
        'snapshot_id',
        'backup_id',
        'name',
        'description',
        'volume_type',
        'metadata',
        'imageRef',
        'image',
        'source_volid',
        'scheduler_hints',
    )

    ATTRIBUTES = (
        AVAILABILITY_ZONE_ATTR,
        SIZE_ATTR,
        SNAPSHOT_ID_ATTR,
        DISPLAY_NAME_ATTR,
        DISPLAY_DESCRIPTION_ATTR,
        VOLUME_TYPE_ATTR,
        METADATA_ATTR,
        SOURCE_VOLID_ATTR,
        STATUS,
        CREATED_AT,
        BOOTABLE,
        METADATA_VALUES_ATTR,
        ENCRYPTED_ATTR,
        ATTACHMENTS,
    ) = (
        'availability_zone',
        'size',
        'snapshot_id',
        'display_name',
        'display_description',
        'volume_type',
        'metadata',
        'source_volid',
        'status',
        'created_at',
        'bootable',
        'metadata_values',
        'encrypted',
        'attachments',
    )

    properties_schema = {
        AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('The availability zone in which the volume will be created.')),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('The size of the volume in GB. '
                            'On update only increase in size is supported.'),
                          update_allowed=True,
                          constraints=[
                              constraints.Range(min=1),
                          ]),
        SNAPSHOT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the snapshot to create the volume from.'),
            constraints=[constraints.CustomConstraint('cinder.snapshot')]),
        BACKUP_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the backup to create the volume from.')),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('A name used to distinguish the volume.'),
            update_allowed=True,
        ),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('A description of the volume.'),
            update_allowed=True,
        ),
        VOLUME_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the type of volume to use, mapping to a '
              'specific backend.'),
            constraints=[constraints.CustomConstraint('cinder.vtype')],
            update_allowed=True),
        METADATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Key/value pairs to associate with the volume.'),
            update_allowed=True,
        ),
        IMAGE_REF:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the image to create the volume from.'),
                          support_status=support.SupportStatus(
                              support.DEPRECATED,
                              _('Use property %s.') % IMAGE)),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the name or ID of the image to create the '
              'volume from.'),
            constraints=[constraints.CustomConstraint('glance.image')]),
        SOURCE_VOLID:
        properties.Schema(
            properties.Schema.STRING,
            _('If specified, the volume to use as source.'),
            constraints=[constraints.CustomConstraint('cinder.volume')]),
        CINDER_SCHEDULER_HINTS:
        properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key-value pairs specified by the client to help '
              'the Cinder scheduler creating a volume.'),
            support_status=support.SupportStatus(version='2015.1')),
    }

    attributes_schema = {
        AVAILABILITY_ZONE_ATTR:
        attributes.Schema(
            _('The availability zone in which the volume is located.')),
        SIZE_ATTR:
        attributes.Schema(_('The size of the volume in GB.')),
        SNAPSHOT_ID_ATTR:
        attributes.Schema(
            _('The snapshot the volume was created from, if any.')),
        DISPLAY_NAME_ATTR:
        attributes.Schema(_('Name of the volume.')),
        DISPLAY_DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the volume.')),
        VOLUME_TYPE_ATTR:
        attributes.Schema(
            _('The type of the volume mapping to a backend, if any.')),
        METADATA_ATTR:
        attributes.Schema(_('Key/value pairs associated with the volume.')),
        SOURCE_VOLID_ATTR:
        attributes.Schema(_('The volume used as source, if any.')),
        STATUS:
        attributes.Schema(_('The current status of the volume.')),
        CREATED_AT:
        attributes.Schema(_('The timestamp indicating volume creation.')),
        BOOTABLE:
        attributes.Schema(
            _('Boolean indicating if the volume can be booted or not.')),
        METADATA_VALUES_ATTR:
        attributes.Schema(
            _('Key/value pairs associated with the volume in raw dict form.')),
        ENCRYPTED_ATTR:
        attributes.Schema(
            _('Boolean indicating if the volume is encrypted or not.')),
        ATTACHMENTS:
        attributes.Schema(_('The list of attachments of the volume.')),
    }

    _volume_creating_status = ['creating', 'restoring-backup', 'downloading']

    default_client_name = 'cinder'

    def _name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return super(CinderVolume, self)._name()

    def _description(self):
        return self.properties[self.DESCRIPTION]

    def _create_arguments(self):
        arguments = {
            'size': self.properties[self.SIZE],
            'availability_zone': self.properties[self.AVAILABILITY_ZONE]
        }
        if self.properties.get(self.IMAGE):
            arguments['imageRef'] = self.client_plugin('glance').get_image_id(
                self.properties[self.IMAGE])
        elif self.properties.get(self.IMAGE_REF):
            arguments['imageRef'] = self.properties[self.IMAGE_REF]

        optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
                     self.METADATA, self.CINDER_SCHEDULER_HINTS)
        arguments.update((prop, self.properties[prop]) for prop in optionals
                         if self.properties[prop])

        return arguments

    def _resolve_attribute(self, name):
        cinder = self.client()
        vol = cinder.volumes.get(self.resource_id)
        if name == self.METADATA_ATTR:
            return six.text_type(jsonutils.dumps(vol.metadata))
        elif name == self.METADATA_VALUES_ATTR:
            return vol.metadata
        if cinder.volume_api_version >= 2:
            if name == self.DISPLAY_NAME_ATTR:
                return vol.name
            elif name == self.DISPLAY_DESCRIPTION_ATTR:
                return vol.description
        return six.text_type(getattr(vol, name))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        vol = None
        checkers = []
        cinder = self.client()
        # update the name and description for cinder volume
        if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
            vol = cinder.volumes.get(self.resource_id)
            update_name = (prop_diff.get(self.NAME)
                           or self.properties.get(self.NAME))
            update_description = (prop_diff.get(self.DESCRIPTION)
                                  or self.properties.get(self.DESCRIPTION))
            kwargs = self._fetch_name_and_description(
                cinder.volume_api_version, update_name, update_description)
            cinder.volumes.update(vol, **kwargs)
        # update the metadata for cinder volume
        if self.METADATA in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            metadata = prop_diff.get(self.METADATA)
            cinder.volumes.update_all_metadata(vol, metadata)
        # retype
        if self.VOLUME_TYPE in prop_diff:
            if cinder.volume_api_version == 1:
                LOG.info(
                    _LI('Volume type update not supported '
                        'by Cinder API V1.'))
                raise exception.NotSupported(
                    feature=_('Using Cinder API V1, volume_type update'))
            else:
                if not vol:
                    vol = cinder.volumes.get(self.resource_id)
                new_vol_type = prop_diff.get(self.VOLUME_TYPE)
                cinder.volumes.retype(vol, new_vol_type, 'never')
        # extend volume size
        if self.SIZE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)

            new_size = prop_diff[self.SIZE]
            if new_size < vol.size:
                raise exception.NotSupported(feature=_("Shrinking volume"))

            elif new_size > vol.size:
                if vol.attachments:
                    # NOTE(pshchelo):
                    # this relies on current behavior of cinder attachments,
                    # i.e. volume attachments is a list with len<=1,
                    # so the volume can be attached only to single instance,
                    # and id of attachment is the same as id of the volume
                    # it describes, so detach/attach the same volume
                    # will not change volume attachment id.
                    server_id = vol.attachments[0]['server_id']
                    device = vol.attachments[0]['device']
                    attachment_id = vol.attachments[0]['id']
                    detach_task = vol_task.VolumeDetachTask(
                        self.stack, server_id, attachment_id)
                    checkers.append(scheduler.TaskRunner(detach_task))
                    extend_task = vol_task.VolumeExtendTask(
                        self.stack, vol.id, new_size)
                    checkers.append(scheduler.TaskRunner(extend_task))
                    attach_task = vol_task.VolumeAttachTask(
                        self.stack, server_id, vol.id, device)
                    checkers.append(scheduler.TaskRunner(attach_task))

                else:
                    extend_task = vol_task.VolumeExtendTask(
                        self.stack, vol.id, new_size)
                    checkers.append(scheduler.TaskRunner(extend_task))

        if checkers:
            checkers[0].start()
        return checkers

    def check_update_complete(self, checkers):
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def handle_snapshot(self):
        backup = self.client().backups.create(self.resource_id)
        return backup.id

    def check_snapshot_complete(self, backup_id):
        backup = self.client().backups.get(backup_id)
        if backup.status == 'creating':
            return False
        if backup.status == 'available':
            self.data_set('backup_id', backup_id)
            return True
        raise exception.Error(backup.status)

    def handle_delete_snapshot(self, snapshot):
        backup_id = snapshot['resource_data']['backup_id']

        def delete():
            cinder = self.client()
            try:
                cinder.backups.delete(backup_id)
                while True:
                    yield
                    cinder.backups.get(backup_id)
            except Exception as ex:
                self.client_plugin().ignore_not_found(ex)

        delete_task = scheduler.TaskRunner(delete)
        delete_task.start()
        return delete_task

    def check_delete_snapshot_complete(self, delete_task):
        return delete_task.step()

    def validate(self):
        """Validate provided params."""
        res = super(CinderVolume, self).validate()
        if res is not None:
            return res

        # Scheduler hints are only supported from Cinder API v2
        if (self.properties.get(self.CINDER_SCHEDULER_HINTS)
                and self.client().volume_api_version == 1):
            raise exception.StackValidationFailed(
                message=_('Scheduler hints are not supported by the current '
                          'volume API.'))

    def handle_restore(self, defn, restore_data):
        backup_id = restore_data['resource_data']['backup_id']
        ignore_props = (self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID,
                        self.SIZE)
        props = dict((key, value) for (
            key,
            value) in six.iteritems(defn.properties(self.properties_schema))
                     if key not in ignore_props and value is not None)
        props[self.BACKUP_ID] = backup_id
        return defn.freeze(properties=props)
Ejemplo n.º 5
0
class LoadBalancer(resource.Resource):
    """
    A resource to link a neutron pool with servers.
    """

    PROPERTIES = (
        POOL_ID,
        PROTOCOL_PORT,
        MEMBERS,
    ) = (
        'pool_id',
        'protocol_port',
        'members',
    )

    properties_schema = {
        POOL_ID:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the load balancing pool.'),
                          required=True,
                          update_allowed=True),
        PROTOCOL_PORT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Port number on which the servers are running on the members.'),
            required=True,
            constraints=[
                constraints.Range(0, 65535),
            ]),
        MEMBERS:
        properties.Schema(properties.Schema.LIST,
                          _('The list of Nova server IDs load balanced.'),
                          update_allowed=True),
    }

    default_client_name = 'neutron'

    def handle_create(self):
        pool = self.properties[self.POOL_ID]
        client = self.neutron()
        protocol_port = self.properties[self.PROTOCOL_PORT]

        for member in self.properties[self.MEMBERS] or []:
            address = self.client_plugin('nova').server_to_ipaddress(member)
            lb_member = client.create_member({
                'member': {
                    'pool_id': pool,
                    'address': address,
                    'protocol_port': protocol_port
                }
            })['member']
            self.data_set(member, lb_member['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        new_props = json_snippet.properties(self.properties_schema,
                                            self.context)

        # Valid use cases are:
        # - Membership controlled by members property in template
        # - Empty members property in template; membership controlled by
        #   "updates" triggered from autoscaling group.
        # Mixing the two will lead to undefined behaviour.
        if (self.MEMBERS in prop_diff
                and (self.properties[self.MEMBERS] is not None
                     or new_props[self.MEMBERS] is not None)):
            members = set(new_props[self.MEMBERS] or [])
            rd_members = self.data()
            old_members = set(six.iterkeys(rd_members))
            client = self.neutron()
            for member in old_members - members:
                member_id = rd_members[member]
                try:
                    client.delete_member(member_id)
                except Exception as ex:
                    self.client_plugin().ignore_not_found(ex)
                self.data_delete(member)
            pool = self.properties[self.POOL_ID]
            protocol_port = self.properties[self.PROTOCOL_PORT]
            for member in members - old_members:
                address = self.client_plugin('nova').server_to_ipaddress(
                    member)
                lb_member = client.create_member({
                    'member': {
                        'pool_id': pool,
                        'address': address,
                        'protocol_port': protocol_port
                    }
                })['member']
                self.data_set(member, lb_member['id'])

    def handle_delete(self):
        client = self.neutron()
        # FIXME(pshchelo): this deletes members in a tight loop,
        # so is prone to OverLimit bug similar to LP 1265937
        for member, member_id in self.data().items():
            try:
                client.delete_member(member_id)
            except Exception as ex:
                self.client_plugin().ignore_not_found(ex)
            self.data_delete(member)
Ejemplo n.º 6
0
class CinderVolume(Volume):

    PROPERTIES = (
        AVAILABILITY_ZONE, SIZE, SNAPSHOT_ID, BACKUP_ID, NAME,
        DESCRIPTION, VOLUME_TYPE, METADATA, IMAGE_REF, IMAGE,
        SOURCE_VOLID,
    ) = (
        'availability_zone', 'size', 'snapshot_id', 'backup_id', 'name',
        'description', 'volume_type', 'metadata', 'imageRef', 'image',
        'source_volid',
    )

    ATTRIBUTES = (
        AVAILABILITY_ZONE_ATTR, SIZE_ATTR, SNAPSHOT_ID_ATTR, DISPLAY_NAME,
        DISPLAY_DESCRIPTION, VOLUME_TYPE_ATTR, METADATA_ATTR,
        SOURCE_VOLID_ATTR, STATUS, CREATED_AT, BOOTABLE,
    ) = (
        'availability_zone', 'size', 'snapshot_id', 'display_name',
        'display_description', 'volume_type', 'metadata',
        'source_volid', 'status', 'created_at', 'bootable',
    )

    properties_schema = {
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _('The availability zone in which the volume will be created.')
        ),
        SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('The size of the volume in GB. '
              'On update only increase in size is supported.'),
            update_allowed=True,
            constraints=[
                constraints.Range(min=1),
            ]
        ),
        SNAPSHOT_ID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the snapshot to create the volume from.')
        ),
        BACKUP_ID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the backup to create the volume from.')
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('A name used to distinguish the volume.'),
            update_allowed=True,
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('A description of the volume.'),
            update_allowed=True,
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the type of volume to use, mapping to a '
              'specific backend.')
        ),
        METADATA: properties.Schema(
            properties.Schema.MAP,
            _('Key/value pairs to associate with the volume.'),
            update_allowed=True,
        ),
        IMAGE_REF: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the image to create the volume from.'),
            support_status=support.SupportStatus(
                support.DEPRECATED,
                _('Use property %s.') % IMAGE)
        ),
        IMAGE: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the name or ID of the image to create the '
              'volume from.'),
            constraints=[
                constraints.CustomConstraint('glance.image')
            ]
        ),
        SOURCE_VOLID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the volume to use as source.')
        ),
    }

    attributes_schema = {
        AVAILABILITY_ZONE_ATTR: attributes.Schema(
            _('The availability zone in which the volume is located.')
        ),
        SIZE_ATTR: attributes.Schema(
            _('The size of the volume in GB.')
        ),
        SNAPSHOT_ID_ATTR: attributes.Schema(
            _('The snapshot the volume was created from, if any.')
        ),
        DISPLAY_NAME: attributes.Schema(
            _('Name of the volume.')
        ),
        DISPLAY_DESCRIPTION: attributes.Schema(
            _('Description of the volume.')
        ),
        VOLUME_TYPE_ATTR: attributes.Schema(
            _('The type of the volume mapping to a backend, if any.')
        ),
        METADATA_ATTR: attributes.Schema(
            _('Key/value pairs associated with the volume.')
        ),
        SOURCE_VOLID_ATTR: attributes.Schema(
            _('The volume used as source, if any.')
        ),
        STATUS: attributes.Schema(
            _('The current status of the volume.')
        ),
        CREATED_AT: attributes.Schema(
            _('The timestamp indicating volume creation.')
        ),
        BOOTABLE: attributes.Schema(
            _('Boolean indicating if the volume can be booted or not.')
        ),
    }

    _volume_creating_status = ['creating', 'restoring-backup', 'downloading']

    def _display_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return super(CinderVolume, self)._display_name()

    def _display_description(self):
        return self.properties[self.DESCRIPTION]

    def _create_arguments(self):
        arguments = {
            'size': self.properties[self.SIZE],
            'availability_zone': self.properties[self.AVAILABILITY_ZONE]
        }
        if self.properties.get(self.IMAGE):
            arguments['imageRef'] = glance_utils.get_image_id(
                self.glance(), self.properties[self.IMAGE])
        elif self.properties.get(self.IMAGE_REF):
            arguments['imageRef'] = self.properties[self.IMAGE_REF]

        optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
                     self.METADATA)
        arguments.update((prop, self.properties[prop]) for prop in optionals
                         if self.properties[prop])
        return arguments

    def _resolve_attribute(self, name):
        vol = self.cinder().volumes.get(self.resource_id)
        if name == 'metadata':
            return unicode(json.dumps(vol.metadata))
        return unicode(getattr(vol, name))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        vol = None
        # update the name and description for cinder volume
        if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
            vol = self.cinder().volumes.get(self.resource_id)
            kwargs = {}
            update_name = (prop_diff.get(self.NAME) or
                           self.properties.get(self.NAME))
            update_description = (prop_diff.get(self.DESCRIPTION) or
                                  self.properties.get(self.DESCRIPTION))
            kwargs['display_name'] = update_name
            kwargs['display_description'] = update_description
            self.cinder().volumes.update(vol, **kwargs)
        # update the metadata for cinder volume
        if self.METADATA in prop_diff:
            if not vol:
                vol = self.cinder().volumes.get(self.resource_id)
            metadata = prop_diff.get(self.METADATA)
            self.cinder().volumes.update_all_metadata(vol, metadata)
        # update the size in super
        return super(CinderVolume, self).handle_update(json_snippet,
                                                       tmpl_diff,
                                                       prop_diff)
Ejemplo n.º 7
0
class ResourceGroup(resource.Resource):
    """Heat Template Resource for Monasca Notification.

    This plug-in requires python-monascaclient>=1.0.22. So to enable this
    plug-in, install this client library and restart the heat-engine.
    """

    support_status = support.SupportStatus(
        version='6.0.0',
        status=support.UNSUPPORTED)

    default_client_name = 'senlin'

    PROPERTIES = (
        COUNT, INDEX_VAR, RESOURCE_DEF, REMOVAL_POLICIES,
    ) = (
        'count', 'index_var', 'resource_def', 'removal_policies',
    )

    _RESOURCE_DEF_KEYS = (
        RESOURCE_DEF_TYPE, RESOURCE_DEF_PROPERTIES, RESOURCE_DEF_METADATA,
    ) = (
        'type', 'properties', 'metadata',
    )

    _REMOVAL_POLICIES_KEYS = (
        REMOVAL_RSRC_LIST,
    ) = (
        'resource_list',
    )

    _ROLLING_UPDATES_SCHEMA_KEYS = (
        MIN_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME,
    ) = (
        'min_in_service', 'max_batch_size', 'pause_time',
    )

    _BATCH_CREATE_SCHEMA_KEYS = (
        MAX_BATCH_SIZE, PAUSE_TIME,
    ) = (
        'max_batch_size', 'pause_time',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE, BATCH_CREATE,
    ) = (
        'rolling_update', 'batch_create',
    )

    ATTRIBUTES = (
        REFS, ATTR_ATTRIBUTES,
    ) = (
        'refs', 'attributes',
    )

    properties_schema = {
        COUNT: properties.Schema(
            properties.Schema.INTEGER,
            _('The number of resources to create.'),
            default=1,
            constraints=[
                constraints.Range(min=0),
            ],
            update_allowed=True
        ),
        INDEX_VAR: properties.Schema(
            properties.Schema.STRING,
            _('A variable that this resource will use to replace with the '
              'current index of a given resource in the group. Can be used, '
              'for example, to customize the name property of grouped '
              'servers in order to differentiate them when listed with '
              'nova client.'),
            default="%index%",
            constraints=[
                constraints.Length(min=3)
            ],
            support_status=support.SupportStatus(version='2014.2')
        ),
        RESOURCE_DEF: properties.Schema(
            properties.Schema.MAP,
            _('Resource definition for the resources in the group. The value '
              'of this property is the definition of a resource just as if '
              'it had been declared in the template itself.'),
            schema={
                RESOURCE_DEF_TYPE: properties.Schema(
                    properties.Schema.STRING,
                    _('The type of the resources in the group'),
                    required=True
                ),
                RESOURCE_DEF_PROPERTIES: properties.Schema(
                    properties.Schema.MAP,
                    _('Property values for the resources in the group')
                ),
                RESOURCE_DEF_METADATA: properties.Schema(
                    properties.Schema.MAP,
                    _('Supplied metadata for the resources in the group'),
                    support_status=support.SupportStatus(version='5.0.0')
                ),

            },
            required=True,
            update_allowed=True
        ),
        REMOVAL_POLICIES: properties.Schema(
            properties.Schema.LIST,
            _('Policies for removal of resources on update'),
            schema=properties.Schema(
                properties.Schema.MAP,
                _('Policy to be processed when doing an update which '
                  'requires removal of specific resources.'),
                schema={
                    REMOVAL_RSRC_LIST: properties.Schema(
                        properties.Schema.LIST,
                        _("List of resources to be removed "
                          "when doing an update which requires removal of "
                          "specific resources. "
                          "The resource may be specified several ways: "
                          "(1) The resource name, as in the nested stack, "
                          "(2) The resource reference returned from "
                          "get_resource in a template, as available via "
                          "the 'refs' attribute "
                          "Note this is destructive on update when specified; "
                          "even if the count is not being reduced, and once "
                          "a resource name is removed, it's name is never "
                          "reused in subsequent updates"
                          ),
                        default=[]
                    ),
                },
            ),
            update_allowed=True,
            default=[],
            support_status=support.SupportStatus(version='2015.1')
        ),
    }

    def handle_create(self):
        profile_id = '111'
        params = {
            'name': self.physical_resource_name(),
            'profile_id': profile_id,
            'desired_capacity': self.properties[self.COUNT],
            'min_size': 0,
            'max_size': -1,
            'metadata': self.properties[self.METADATA],
        }

        cluster = self.client().create(models.Cluster, params)
        self.resource_id_set(cluster['id'])

    def handle_delete(self):
        if self.resource_id is not None:
            params = {
                'id': self.resource_id
            }
            try:
                cluster = self.client().delete(models.Cluster, params)
            except Exception as ex:
                self.client_plugin().ignore_not_found(ex)

        return self.resource_id

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        params = {
            'id': resource_id
        }
        try:
            cluster = self.client().get(models.Cluster, params)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False
Ejemplo n.º 8
0
class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
    """A resource that implements Cinder volumes.

    Cinder volume is a storage in the form of block devices. It can be used,
    for example, for providing storage to instance. Volume supports creation
    from snapshot, backup or image. Also volume can be created only by size.
    """

    PROPERTIES = (
        AVAILABILITY_ZONE, SIZE, SNAPSHOT_ID, BACKUP_ID, NAME,
        DESCRIPTION, VOLUME_TYPE, METADATA, IMAGE_REF, IMAGE,
        SOURCE_VOLID, CINDER_SCHEDULER_HINTS, READ_ONLY, MULTI_ATTACH,
    ) = (
        'availability_zone', 'size', 'snapshot_id', 'backup_id', 'name',
        'description', 'volume_type', 'metadata', 'imageRef', 'image',
        'source_volid', 'scheduler_hints', 'read_only', 'multiattach',
    )

    ATTRIBUTES = (
        AVAILABILITY_ZONE_ATTR, SIZE_ATTR, SNAPSHOT_ID_ATTR, DISPLAY_NAME_ATTR,
        DISPLAY_DESCRIPTION_ATTR, VOLUME_TYPE_ATTR, METADATA_ATTR,
        SOURCE_VOLID_ATTR, STATUS, CREATED_AT, BOOTABLE, METADATA_VALUES_ATTR,
        ENCRYPTED_ATTR, ATTACHMENTS, MULTI_ATTACH_ATTR,
    ) = (
        'availability_zone', 'size', 'snapshot_id', 'display_name',
        'display_description', 'volume_type', 'metadata',
        'source_volid', 'status', 'created_at', 'bootable', 'metadata_values',
        'encrypted', 'attachments', 'multiattach',
    )

    properties_schema = {
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _('The availability zone in which the volume will be created.')
        ),
        SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('The size of the volume in GB. '
              'On update only increase in size is supported. This property '
              'is required unless property %(backup)s or %(vol)s or '
              '%(snapshot)s is specified.')
            % dict(backup=BACKUP_ID,
                   vol=SOURCE_VOLID,
                   snapshot=SNAPSHOT_ID),
            update_allowed=True,
            constraints=[
                constraints.Range(min=1),
            ]
        ),
        SNAPSHOT_ID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the snapshot to create the volume from.'),
            constraints=[
                constraints.CustomConstraint('cinder.snapshot')
            ]
        ),
        BACKUP_ID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the backup to create the volume from.'),
            update_allowed=True,
            constraints=[
                constraints.CustomConstraint('cinder.backup')
            ]
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('A name used to distinguish the volume.'),
            update_allowed=True,
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('A description of the volume.'),
            update_allowed=True,
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the type of volume to use, mapping to a '
              'specific backend.'),
            constraints=[
                constraints.CustomConstraint('cinder.vtype')
            ],
            update_allowed=True
        ),
        METADATA: properties.Schema(
            properties.Schema.MAP,
            _('Key/value pairs to associate with the volume.'),
            update_allowed=True,
        ),
        IMAGE_REF: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the image to create the volume from.'),
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                message=_('Use property %s.') % IMAGE,
                version='5.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    version='2014.1'
                )
            )
        ),
        IMAGE: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the name or ID of the image to create the '
              'volume from.'),
            constraints=[
                constraints.CustomConstraint('glance.image')
            ]
        ),
        SOURCE_VOLID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the volume to use as source.'),
            constraints=[
                constraints.CustomConstraint('cinder.volume')
            ]
        ),
        CINDER_SCHEDULER_HINTS: properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key-value pairs specified by the client to help '
              'the Cinder scheduler creating a volume.'),
            support_status=support.SupportStatus(version='2015.1')
        ),
        READ_ONLY: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Enables or disables read-only access mode of volume.'),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True,
        ),
        MULTI_ATTACH: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether allow the volume to be attached more than once.'),
            support_status=support.SupportStatus(version='6.0.0'),
            default=False
        ),
    }

    attributes_schema = {
        AVAILABILITY_ZONE_ATTR: attributes.Schema(
            _('The availability zone in which the volume is located.'),
            type=attributes.Schema.STRING
        ),
        SIZE_ATTR: attributes.Schema(
            _('The size of the volume in GB.'),
            type=attributes.Schema.STRING
        ),
        SNAPSHOT_ID_ATTR: attributes.Schema(
            _('The snapshot the volume was created from, if any.'),
            type=attributes.Schema.STRING
        ),
        DISPLAY_NAME_ATTR: attributes.Schema(
            _('Name of the volume.'),
            type=attributes.Schema.STRING
        ),
        DISPLAY_DESCRIPTION_ATTR: attributes.Schema(
            _('Description of the volume.'),
            type=attributes.Schema.STRING
        ),
        VOLUME_TYPE_ATTR: attributes.Schema(
            _('The type of the volume mapping to a backend, if any.'),
            type=attributes.Schema.STRING
        ),
        METADATA_ATTR: attributes.Schema(
            _('Key/value pairs associated with the volume.'),
            type=attributes.Schema.STRING
        ),
        SOURCE_VOLID_ATTR: attributes.Schema(
            _('The volume used as source, if any.'),
            type=attributes.Schema.STRING
        ),
        STATUS: attributes.Schema(
            _('The current status of the volume.'),
            type=attributes.Schema.STRING
        ),
        CREATED_AT: attributes.Schema(
            _('The timestamp indicating volume creation.'),
            type=attributes.Schema.STRING
        ),
        BOOTABLE: attributes.Schema(
            _('Boolean indicating if the volume can be booted or not.'),
            type=attributes.Schema.STRING
        ),
        METADATA_VALUES_ATTR: attributes.Schema(
            _('Key/value pairs associated with the volume in raw dict form.'),
            type=attributes.Schema.MAP
        ),
        ENCRYPTED_ATTR: attributes.Schema(
            _('Boolean indicating if the volume is encrypted or not.'),
            type=attributes.Schema.STRING
        ),
        ATTACHMENTS: attributes.Schema(
            _('The list of attachments of the volume.'),
            type=attributes.Schema.STRING
        ),
        MULTI_ATTACH_ATTR: attributes.Schema(
            _('Boolean indicating whether allow the volume to be attached '
              'more than once.'),
            type=attributes.Schema.BOOLEAN,
            support_status=support.SupportStatus(version='6.0.0'),
        ),
    }

    _volume_creating_status = ['creating', 'restoring-backup', 'downloading']

    entity = 'volumes'

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.REPLACE,
                [self.IMAGE],
                value_path=[self.IMAGE_REF]
            )
        ]

    def _name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return super(CinderVolume, self)._name()

    def _description(self):
        return self.properties[self.DESCRIPTION]

    def _create_arguments(self):
        arguments = {
            'size': self.properties[self.SIZE],
            'availability_zone': self.properties[self.AVAILABILITY_ZONE],
        }

        scheduler_hints = self._scheduler_hints(
            self.properties[self.CINDER_SCHEDULER_HINTS])
        if scheduler_hints:
            arguments[self.CINDER_SCHEDULER_HINTS] = scheduler_hints

        if self.properties[self.IMAGE]:
            arguments['imageRef'] = self.client_plugin(
                'glance').find_image_by_name_or_id(
                self.properties[self.IMAGE])
        elif self.properties[self.IMAGE_REF]:
            arguments['imageRef'] = self.properties[self.IMAGE_REF]

        optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
                     self.METADATA, self.MULTI_ATTACH)

        arguments.update((prop, self.properties[prop]) for prop in optionals
                         if self.properties[prop] is not None)

        return arguments

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        cinder = self.client()
        vol = cinder.volumes.get(self.resource_id)
        if name == self.METADATA_ATTR:
            return six.text_type(jsonutils.dumps(vol.metadata))
        elif name == self.METADATA_VALUES_ATTR:
            return vol.metadata
        if name == self.DISPLAY_NAME_ATTR:
            return vol.name
        elif name == self.DISPLAY_DESCRIPTION_ATTR:
            return vol.description
        return six.text_type(getattr(vol, name))

    def check_create_complete(self, vol_id):
        complete = super(CinderVolume, self).check_create_complete(vol_id)
        # Cinder just supports update read only for volume in available,
        # if we update in handle_create(), maybe the volume still in
        # creating, then cinder will raise an exception
        if complete:
            self._store_config_default_properties()
            self._update_read_only(self.properties[self.READ_ONLY])

        return complete

    def _store_config_default_properties(self, attributes=None):
        """Method for storing default values of properties in resource data.

        Some properties have default values, specified in project configuration
        file, so cannot be hardcoded into properties_schema, but should be
        stored for further using. So need to get created resource and take
        required property's value.
        """
        if attributes is None:
            attributes = self._show_resource()

        if attributes.get('volume_type') is not None:
            self.data_set(self.VOLUME_TYPE, attributes['volume_type'])
        else:
            self.data_delete(self.VOLUME_TYPE)

    def _extend_volume(self, new_size):
        try:
            self.client().volumes.extend(self.resource_id, new_size)
        except Exception as ex:
            if self.client_plugin().is_client_exception(ex):
                raise exception.Error(_(
                    "Failed to extend volume %(vol)s - %(err)s") % {
                        'vol': self.resource_id, 'err': six.text_type(ex)})
            else:
                raise
        return True

    def _update_read_only(self, read_only_flag):
        if read_only_flag is not None:
            self.client().volumes.update_readonly_flag(self.resource_id,
                                                       read_only_flag)

        return True

    def _check_extend_volume_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'extending':
            LOG.debug("Volume %s is being extended" % vol.id)
            return False

        if vol.status != 'available':
            LOG.info(_LI("Resize failed: Volume %(vol)s "
                         "is in %(status)s state."),
                     {'vol': vol.id, 'status': vol.status})
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume resize failed'))

        LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id})
        return True

    def _backup_restore(self, vol_id, backup_id):
        try:
            self.client().restores.restore(backup_id, vol_id)
        except Exception as ex:
            if self.client_plugin().is_client_exception(ex):
                raise exception.Error(_(
                    "Failed to restore volume %(vol)s from backup %(backup)s "
                    "- %(err)s") % {'vol': vol_id,
                                    'backup': backup_id,
                                    'err': ex})
            else:
                raise
        return True

    def _check_backup_restore_complete(self):
        vol = self.client().volumes.get(self.resource_id)
        if vol.status == 'restoring-backup':
            LOG.debug("Volume %s is being restoring from backup" % vol.id)
            return False

        if vol.status != 'available':
            LOG.info(_LI("Restore failed: Volume %(vol)s is in %(status)s "
                         "state."), {'vol': vol.id, 'status': vol.status})
            raise exception.ResourceUnknownStatus(
                resource_status=vol.status,
                result=_('Volume backup restore failed'))

        LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id})
        return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        vol = None
        cinder = self.client()
        prg_resize = None
        prg_attach = None
        prg_detach = None
        prg_restore = None
        prg_access = None

        # update the name and description for cinder volume
        if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
            vol = cinder.volumes.get(self.resource_id)
            update_name = (prop_diff.get(self.NAME) or
                           self.properties[self.NAME])
            update_description = (prop_diff.get(self.DESCRIPTION) or
                                  self.properties[self.DESCRIPTION])
            kwargs = self._fetch_name_and_description(update_name,
                                                      update_description)
            cinder.volumes.update(vol, **kwargs)
        # update the metadata for cinder volume
        if self.METADATA in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            metadata = prop_diff.get(self.METADATA)
            cinder.volumes.update_all_metadata(vol, metadata)
        # retype
        if self.VOLUME_TYPE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            new_vol_type = prop_diff.get(self.VOLUME_TYPE)
            cinder.volumes.retype(vol, new_vol_type, 'never')
        # update read_only access mode
        if self.READ_ONLY in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            flag = prop_diff.get(self.READ_ONLY)
            prg_access = progress.VolumeUpdateAccessModeProgress(
                read_only=flag)
            prg_detach, prg_attach = self._detach_attach_progress(vol)
        # restore the volume from backup
        if self.BACKUP_ID in prop_diff:
            prg_restore = progress.VolumeBackupRestoreProgress(
                vol_id=self.resource_id,
                backup_id=prop_diff.get(self.BACKUP_ID))
        # extend volume size
        if self.SIZE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)

            new_size = prop_diff[self.SIZE]
            if new_size < vol.size:
                raise exception.NotSupported(feature=_("Shrinking volume"))

            elif new_size > vol.size:
                prg_resize = progress.VolumeResizeProgress(size=new_size)
                prg_detach, prg_attach = self._detach_attach_progress(vol)

        return prg_restore, prg_detach, prg_resize, prg_access, prg_attach

    def _detach_attach_progress(self, vol):
        prg_attach = None
        prg_detach = None
        if vol.attachments:
            # NOTE(pshchelo):
            # this relies on current behavior of cinder attachments,
            # i.e. volume attachments is a list with len<=1,
            # so the volume can be attached only to single instance,
            # and id of attachment is the same as id of the volume
            # it describes, so detach/attach the same volume
            # will not change volume attachment id.
            server_id = vol.attachments[0]['server_id']
            device = vol.attachments[0]['device']
            attachment_id = vol.attachments[0]['id']
            prg_detach = progress.VolumeDetachProgress(
                server_id, vol.id, attachment_id)
            prg_attach = progress.VolumeAttachProgress(
                server_id, vol.id, device)

        return prg_detach, prg_attach

    def _detach_volume_to_complete(self, prg_detach):
        if not prg_detach.called:
            self.client_plugin('nova').detach_volume(prg_detach.srv_id,
                                                     prg_detach.attach_id)
            prg_detach.called = True
            return False
        if not prg_detach.cinder_complete:
            cinder_complete_res = self.client_plugin(
            ).check_detach_volume_complete(prg_detach.vol_id)
            prg_detach.cinder_complete = cinder_complete_res
            return False
        if not prg_detach.nova_complete:
            prg_detach.nova_complete = self.client_plugin(
                'nova').check_detach_volume_complete(prg_detach.srv_id,
                                                     prg_detach.attach_id)
            return False

    def _attach_volume_to_complete(self, prg_attach):
        if not prg_attach.called:
            prg_attach.called = self.client_plugin('nova').attach_volume(
                prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
            return False
        if not prg_attach.complete:
            prg_attach.complete = self.client_plugin(
            ).check_attach_volume_complete(prg_attach.vol_id)
            return prg_attach.complete

    def check_update_complete(self, checkers):
        prg_restore, prg_detach, prg_resize, prg_access, prg_attach = checkers
        if prg_restore:
            if not prg_restore.called:
                prg_restore.called = self._backup_restore(
                    prg_restore.vol_id,
                    prg_restore.backup_id)
                return False
            if not prg_restore.complete:
                prg_restore.complete = self._check_backup_restore_complete()
                return prg_restore.complete and not prg_resize
        if not prg_resize and not prg_access:
            return True
        # detach volume
        if prg_detach:
            if not prg_detach.nova_complete:
                self._detach_volume_to_complete(prg_detach)
                return False
        # resize volume
        if prg_resize:
            if not prg_resize.called:
                prg_resize.called = self._extend_volume(prg_resize.size)
                return False
            if not prg_resize.complete:
                prg_resize.complete = self._check_extend_volume_complete()
                return prg_resize.complete and not prg_attach
        # update read_only access mode
        if prg_access:
            if not prg_access.called:
                prg_access.called = self._update_read_only(
                    prg_access.read_only)
                return False
        # reattach volume back
        if prg_attach:
            return self._attach_volume_to_complete(prg_attach)
        return True

    def handle_snapshot(self):
        backup = self.client().backups.create(self.resource_id)
        self.data_set('backup_id', backup.id)
        return backup.id

    def check_snapshot_complete(self, backup_id):
        backup = self.client().backups.get(backup_id)
        if backup.status == 'creating':
            return False
        if backup.status == 'available':
            return True
        raise exception.Error(backup.fail_reason)

    def handle_delete_snapshot(self, snapshot):
        backup_id = snapshot['resource_data'].get('backup_id')
        if not backup_id:
            return
        try:
            self.client().backups.delete(backup_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return
        else:
            return backup_id

    def check_delete_snapshot_complete(self, backup_id):
        if not backup_id:
            return True
        try:
            self.client().backups.get(backup_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True
        else:
            return False

    def _build_exclusive_options(self):
        exclusive_options = []
        allow_no_size_options = []
        if self.properties.get(self.SNAPSHOT_ID):
            exclusive_options.append(self.SNAPSHOT_ID)
            allow_no_size_options.append(self.SNAPSHOT_ID)
        if self.properties.get(self.SOURCE_VOLID):
            exclusive_options.append(self.SOURCE_VOLID)
            allow_no_size_options.append(self.SOURCE_VOLID)
        if self.properties.get(self.IMAGE):
            exclusive_options.append(self.IMAGE)
        if self.properties.get(self.IMAGE_REF):
            exclusive_options.append(self.IMAGE_REF)
        return exclusive_options, allow_no_size_options

    def _validate_create_sources(self):
        exclusive_options, allow_no_size_ops = self._build_exclusive_options()
        size = self.properties.get(self.SIZE)
        if (size is None and
                (len(allow_no_size_ops) != 1 or len(exclusive_options) != 1)):
            msg = (_('If neither "%(backup_id)s" nor "%(size)s" is '
                     'provided, one and only one of "%(source_vol)s", '
                     '"%(snapshot_id)s" must be specified, but currently '
                     'specified options: %(exclusive_options)s.')
                   % {'backup_id': self.BACKUP_ID,
                      'size': self.SIZE,
                      'source_vol': self.SOURCE_VOLID,
                      'snapshot_id': self.SNAPSHOT_ID,
                      'exclusive_options': exclusive_options})
            raise exception.StackValidationFailed(message=msg)
        elif size and len(exclusive_options) > 1:
            msg = (_('If "%(size)s" is provided, only one of '
                     '"%(image)s", "%(image_ref)s", "%(source_vol)s", '
                     '"%(snapshot_id)s" can be specified, but currently '
                     'specified options: %(exclusive_options)s.')
                   % {'size': self.SIZE,
                      'image': self.IMAGE,
                      'image_ref': self.IMAGE_REF,
                      'source_vol': self.SOURCE_VOLID,
                      'snapshot_id': self.SNAPSHOT_ID,
                      'exclusive_options': exclusive_options})
            raise exception.StackValidationFailed(message=msg)

    def validate(self):
        """Validate provided params."""
        res = super(CinderVolume, self).validate()
        if res is not None:
            return res

        # can not specify both image and imageRef
        image = self.properties.get(self.IMAGE)
        imageRef = self.properties.get(self.IMAGE_REF)
        if image and imageRef:
            raise exception.ResourcePropertyConflict(self.IMAGE,
                                                     self.IMAGE_REF)
        # if not create from backup, need to check other create sources
        if not self.properties.get(self.BACKUP_ID):
            self._validate_create_sources()

    def handle_restore(self, defn, restore_data):
        backup_id = restore_data['resource_data']['backup_id']
        # we can't ignore 'size' property: if user update the size
        # of volume after snapshot, we need to change to old size
        # when restore the volume.
        ignore_props = (
            self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID)
        props = dict(
            (key, value) for (key, value) in
            six.iteritems(defn.properties(self.properties_schema))
            if key not in ignore_props and value is not None)
        props[self.BACKUP_ID] = backup_id
        return defn.freeze(properties=props)

    def parse_live_resource_data(self, resource_properties, resource_data):
        volume_reality = {}

        if (resource_data.get(self.METADATA) and
                resource_data.get(self.METADATA).get(
                    self.READ_ONLY) is not None):
            read_only = resource_data.get(self.METADATA).pop(self.READ_ONLY)
            volume_reality.update({self.READ_ONLY: read_only})

        old_vt = self.data().get(self.VOLUME_TYPE)
        new_vt = resource_data.get(self.VOLUME_TYPE)
        if old_vt != new_vt:
            volume_reality.update({self.VOLUME_TYPE: new_vt})
            self._store_config_default_properties(dict(volume_type=new_vt))

        props_keys = [self.SIZE, self.NAME, self.DESCRIPTION,
                      self.METADATA, self.BACKUP_ID]
        for key in props_keys:
            volume_reality.update({key: resource_data.get(key)})

        return volume_reality
class Cluster(resource.Resource):
    """A resource that creates a Senlin Cluster.

    Cluster resource in senlin can create and manage objects of
    the same nature, e.g. Nova servers, Heat stacks, Cinder volumes, etc.
    The collection of these objects is referred to as a cluster.
    """

    support_status = support.SupportStatus(version='6.0.0')

    default_client_name = 'senlin'

    PROPERTIES = (NAME, PROFILE, DESIRED_CAPACITY, MIN_SIZE, MAX_SIZE,
                  METADATA, TIMEOUT) = ('name', 'profile', 'desired_capacity',
                                        'min_size', 'max_size', 'metadata',
                                        'timeout')

    ATTRIBUTES = (
        ATTR_NAME,
        ATTR_METADATA,
        ATTR_NODES,
        ATTR_DESIRED_CAPACITY,
        ATTR_MIN_SIZE,
        ATTR_MAX_SIZE,
    ) = ("name", 'metadata', 'nodes', 'desired_capacity', 'min_size',
         'max_size')

    _CLUSTER_STATUS = (CLUSTER_INIT, CLUSTER_ACTIVE, CLUSTER_ERROR,
                       CLUSTER_WARNING, CLUSTER_CREATING, CLUSTER_DELETING,
                       CLUSTER_UPDATING) = ('INIT', 'ACTIVE', 'ERROR',
                                            'WARNING', 'CREATING', 'DELETING',
                                            'UPDATING')

    properties_schema = {
        PROFILE:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or id of the Senlin profile.'),
            required=True,
            constraints=[constraints.CustomConstraint('senlin.profile')]),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the cluster. By default, physical resource name '
              'is used.'),
        ),
        DESIRED_CAPACITY:
        properties.Schema(properties.Schema.INTEGER,
                          _('Desired initial number of resources in cluster.'),
                          default=0),
        MIN_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Minimum number of resources in the cluster.'),
                          default=0,
                          constraints=[constraints.Range(min=0)]),
        MAX_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Maximum number of resources in the cluster. '
                            '-1 means unlimited.'),
                          default=-1,
                          constraints=[constraints.Range(min=-1)]),
        METADATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Metadata key-values defined for cluster.'),
        ),
        TIMEOUT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of seconds to wait for the cluster actions.'),
            constraints=[constraints.Range(min=0)]),
    }

    attributes_schema = {
        ATTR_NAME:
        attributes.Schema(_("Cluster name."), type=attributes.Schema.STRING),
        ATTR_METADATA:
        attributes.Schema(_("Cluster metadata."), type=attributes.Schema.MAP),
        ATTR_DESIRED_CAPACITY:
        attributes.Schema(_("Desired capacity of the cluster."),
                          type=attributes.Schema.INTEGER),
        ATTR_NODES:
        attributes.Schema(_("Nodes list in the cluster."),
                          type=attributes.Schema.LIST),
        ATTR_MIN_SIZE:
        attributes.Schema(_("Min size of the cluster."),
                          type=attributes.Schema.INTEGER),
        ATTR_MAX_SIZE:
        attributes.Schema(_("Max size of the cluster."),
                          type=attributes.Schema.INTEGER),
    }

    def handle_create(self):
        params = {
            'name': (self.properties[self.NAME]
                     or self.physical_resource_name()),
            'profile_id': self.properties[self.PROFILE],
            'desired_capacity': self.properties[self.DESIRED_CAPACITY],
            'min_size': self.properties[self.MIN_SIZE],
            'max_size': self.properties[self.MAX_SIZE],
            'metadata': self.properties[self.METADATA],
            'timeout': self.properties[self.TIMEOUT]
        }
        cluster = self.client().create_cluster(**params)
        self.resource_id_set(cluster.id)
        return cluster.id

    def check_create_complete(self, resource_id):
        cluster = self.client().get_cluster(resource_id)
        if cluster.status in [self.CLUSTER_ACTIVE, self.CLUSTER_WARNING]:
            return True
        elif cluster.status in [self.CLUSTER_INIT, self.CLUSTER_CREATING]:
            return False
        else:
            raise exception.ResourceInError(
                status_reason=cluster.status_reason,
                resource_status=cluster.status)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().delete_cluster(self.resource_id)
        return self.resource_id

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        try:
            self.client().get_cluster(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True
        return False

    def validate(self):
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]
        desired_capacity = self.properties[self.DESIRED_CAPACITY]

        if max_size != -1 and max_size < min_size:
            msg = _("%(min_size)s can not be greater than %(max_size)s") % {
                'min_size': self.MIN_SIZE,
                'max_size': self.MAX_SIZE,
            }
            raise exception.StackValidationFailed(message=msg)

        if (desired_capacity < min_size
                or (max_size != -1 and desired_capacity > max_size)):
            msg = _("%(desired_capacity)s must be between %(min_size)s "
                    "and %(max_size)s") % {
                        'desired_capacity': self.DESIRED_CAPACITY,
                        'min_size': self.MIN_SIZE,
                        'max_size': self.MAX_SIZE,
                    }
            raise exception.StackValidationFailed(message=msg)

    def _resolve_attribute(self, name):
        cluster = self.client().get_cluster(self.resource_id)
        return getattr(cluster, name, None)

    def _show_resource(self):
        cluster = self.client().get_cluster(self.resource_id)
        return cluster.to_dict()
Ejemplo n.º 10
0
class TroveCluster(resource.Resource):
    """A resource for managing Trove clusters.

    A Cluster is an opaque cluster used to store Database clusters.
    """

    support_status = support.SupportStatus(version='2015.1')

    TROVE_STATUS = (
        ERROR, FAILED, ACTIVE,
    ) = (
        'ERROR', 'FAILED', 'ACTIVE',
    )

    DELETE_STATUSES = (
        DELETING, NONE
    ) = (
        'DELETING', 'NONE'
    )

    TROVE_STATUS_REASON = {
        FAILED: _('The database instance was created, but heat failed to set '
                  'up the datastore. If a database instance is in the FAILED '
                  'state, it should be deleted and a new one should be '
                  'created.'),
        ERROR: _('The last operation for the database instance failed due to '
                 'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)

    PROPERTIES = (
        NAME, DATASTORE_TYPE, DATASTORE_VERSION, INSTANCES,
    ) = (
        'name', 'datastore_type', 'datastore_version', 'instances',
    )

    _INSTANCE_KEYS = (
        FLAVOR, VOLUME_SIZE,
    ) = (
        'flavor', 'volume_size',
    )

    ATTRIBUTES = (
        INSTANCES_ATTR, IP
    ) = (
        'instances', 'ip'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the cluster to create.'),
            constraints=[
                constraints.Length(max=255),
            ]
        ),
        DATASTORE_TYPE: properties.Schema(
            properties.Schema.STRING,
            _("Name of registered datastore type."),
            required=True,
            constraints=[
                constraints.Length(max=255)
            ]
        ),
        DATASTORE_VERSION: properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            required=True,
            constraints=[constraints.Length(max=255)]
        ),
        INSTANCES: properties.Schema(
            properties.Schema.LIST,
            _("List of database instances."),
            required=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    FLAVOR: properties.Schema(
                        properties.Schema.STRING,
                        _('Flavor of the instance.'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('trove.flavor')
                        ]
                    ),
                    VOLUME_SIZE: properties.Schema(
                        properties.Schema.INTEGER,
                        _('Size of the instance disk volume in GB.'),
                        required=True,
                        constraints=[
                            constraints.Range(1, 150),
                        ]
                    )
                }
            )
        )
    }

    attributes_schema = {
        INSTANCES: attributes.Schema(
            _("A list of instances ids."),
            type=attributes.Schema.LIST
        ),
        IP: attributes.Schema(
            _("A list of cluster instance IPs."),
            type=attributes.Schema.LIST
        )
    }

    default_client_name = 'trove'

    entity = 'clusters'

    def _cluster_name(self):
        return self.properties[self.NAME] or self.physical_resource_name()

    def handle_create(self):
        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        # convert instances to format required by troveclient
        instances = []
        for instance in self.properties[self.INSTANCES]:
            instances.append({
                'flavorRef': self.client_plugin().find_flavor_by_name_or_id(
                    instance[self.FLAVOR]),
                'volume': {'size': instance[self.VOLUME_SIZE]}
            })

        args = {
            'name': self._cluster_name(),
            'datastore': datastore_type,
            'datastore_version': datastore_version,
            'instances': instances
        }
        cluster = self.client().clusters.create(**args)
        self.resource_id_set(cluster.id)
        return cluster.id

    def _refresh_cluster(self, cluster_id):
        try:
            cluster = self.client().clusters.get(cluster_id)
            return cluster
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
                                "OverLimit response during clusters.get():"
                                " %(exception)s"),
                            {'name': self.stack.name,
                             'id': self.stack.id,
                             'exception': exc})
                return None
            else:
                raise

    def check_create_complete(self, cluster_id):
        cluster = self._refresh_cluster(cluster_id)

        if cluster is None:
            return False

        for instance in cluster.instances:
            if instance['status'] in self.BAD_STATUSES:
                raise exception.ResourceInError(
                    resource_status=instance['status'],
                    status_reason=self.TROVE_STATUS_REASON.get(
                        instance['status'], _("Unknown")))

            if instance['status'] != self.ACTIVE:
                return False

        LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
        return True

    def cluster_delete(self, cluster_id):
        try:
            cluster = self.client().clusters.get(cluster_id)
            cluster_status = cluster.task['name']
            if cluster_status not in self.DELETE_STATUSES:
                return False
            if cluster_status != self.DELETING:
                # If cluster already started to delete, don't send another one
                # request for deleting.
                cluster.delete()
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        return True

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            cluster = self.client().clusters.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return cluster.id

    def check_delete_complete(self, cluster_id):
        if not cluster_id:
            return True

        if not self.cluster_delete(cluster_id):
            return False

        try:
            # For some time trove cluster may continue to live
            self._refresh_cluster(cluster_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        res = super(TroveCluster, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(
            datastore_type, datastore_version,
            self.DATASTORE_TYPE, self.DATASTORE_VERSION)

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.INSTANCES_ATTR:
            instances = []
            cluster = self.client().clusters.get(self.resource_id)
            for instance in cluster.instances:
                instances.append(instance['id'])
            return instances
        elif name == self.IP:
            cluster = self.client().clusters.get(self.resource_id)
            return cluster.ip
Ejemplo n.º 11
0
class DesignateZone(resource.Resource):
    """Heat Template Resource for Designate Zone.

    Designate provides DNS-as-a-Service services for OpenStack. So, zone, part
    of domain is a realm with an identification string, unique in DNS.
    """

    support_status = support.SupportStatus(version='8.0.0')

    PROPERTIES = (NAME, TTL, DESCRIPTION, EMAIL, TYPE,
                  MASTERS) = ('name', 'ttl', 'description', 'email', 'type',
                              'masters')

    ATTRIBUTES = (SERIAL, ) = ('serial', )

    TYPES = (PRIMARY, SECONDARY) = ('PRIMARY', 'SECONDARY')

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('DNS Name for the zone.'),
                          required=True,
                          constraints=[constraints.Length(max=255)]),
        # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
        TTL:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds) for the zone.'),
            update_allowed=True,
            constraints=[constraints.Range(min=1, max=2147483647)]),
        # designate mandates to the max length of 160 for description
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of zone.'),
                          update_allowed=True,
                          constraints=[constraints.Length(max=160)]),
        EMAIL:
        properties.Schema(
            properties.Schema.STRING,
            _('E-mail for the zone. Used in SOA records for the zone. '
              'It is required for PRIMARY Type, otherwise ignored.'),
            update_allowed=True,
        ),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Type of zone. PRIMARY is controlled by Designate, SECONDARY '
              'zones are slaved from another DNS Server.'),
            default=PRIMARY,
            constraints=[constraints.AllowedValues(allowed=TYPES)]),
        MASTERS:
        properties.Schema(
            properties.Schema.LIST,
            _('The servers to slave from to get DNS information and is '
              'mandatory for zone type SECONDARY, otherwise ignored.'),
            update_allowed=True)
    }

    attributes_schema = {
        SERIAL:
        attributes.Schema(_("DNS zone serial number."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'designate'

    entity = 'zones'

    def validate(self):
        super(DesignateZone, self).validate()

        def raise_invalid_exception(zone_type, prp):
            if self.properties.get(self.TYPE) == zone_type:
                if not self.properties.get(prp):
                    msg = _('Property %(prp)s is required for zone type '
                            '%(zone_type)s') % {
                                "prp": prp,
                                "zone_type": zone_type
                            }
                    raise exception.StackValidationFailed(message=msg)

        raise_invalid_exception(self.PRIMARY, self.EMAIL)
        raise_invalid_exception(self.SECONDARY, self.MASTERS)

    def handle_create(self):
        args = dict((k, v) for k, v in self.properties.items() if v)
        args['type_'] = args.pop(self.TYPE)

        zone = self.client().zones.create(**args)

        self.resource_id_set(zone['id'])

    def _check_status_complete(self):
        zone = self.client().zones.get(self.resource_id)

        if zone['status'] == 'ERROR':
            raise exception.ResourceInError(resource_status=zone['status'],
                                            status_reason=_('Error in zone'))

        return zone['status'] != 'PENDING'

    def check_create_complete(self, handler_data=None):
        return self._check_status_complete()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict()

        for prp in (self.EMAIL, self.TTL, self.DESCRIPTION, self.MASTERS):
            if prop_diff.get(prp):
                args[prp] = prop_diff.get(prp)

        if len(args.keys()) > 0:
            self.client().zones.update(self.resource_id, args)

    def check_update_complete(self, handler_data=None):
        return self._check_status_complete()

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.SERIAL:
            zone = self.client().zones.get(self.resource_id)
            return zone[name]

    def check_delete_complete(self, handler_data=None):
        if handler_data:
            with self.client_plugin().ignore_not_found:
                return self._check_status_complete()

        return True
Ejemplo n.º 12
0
class RandomString(resource.Resource):
    '''
    A resource which generates a random string.

    This is useful for configuring passwords and secrets on services.
    '''
    PROPERTIES = (
        LENGTH,
        SEQUENCE,
        CHARACTER_CLASSES,
        CHARACTER_SEQUENCES,
        SALT,
    ) = (
        'length',
        'sequence',
        'character_classes',
        'character_sequences',
        'salt',
    )

    _CHARACTER_CLASSES_KEYS = (
        CHARACTER_CLASSES_CLASS,
        CHARACTER_CLASSES_MIN,
    ) = (
        'class',
        'min',
    )

    _CHARACTER_SEQUENCES = (
        CHARACTER_SEQUENCES_SEQUENCE,
        CHARACTER_SEQUENCES_MIN,
    ) = (
        'sequence',
        'min',
    )

    ATTRIBUTES = (VALUE, ) = ('value', )

    properties_schema = {
        LENGTH:
        properties.Schema(properties.Schema.INTEGER,
                          _('Length of the string to generate.'),
                          default=32,
                          constraints=[
                              constraints.Range(1, 512),
                          ]),
        SEQUENCE:
        properties.Schema(
            properties.Schema.STRING,
            _('Sequence of characters to build the random string from.'),
            constraints=[
                constraints.AllowedValues([
                    'lettersdigits', 'letters', 'lowercase', 'uppercase',
                    'digits', 'hexdigits', 'octdigits'
                ]),
            ],
            support_status=support.SupportStatus(
                support.DEPRECATED,
                _('Use property %s.') % CHARACTER_CLASSES)),
        CHARACTER_CLASSES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of character class and their constraints to generate '
              'the random string from.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CHARACTER_CLASSES_CLASS:
                    properties.Schema(
                        properties.Schema.STRING,
                        (_('A character class and its corresponding %(min)s '
                           'constraint to generate the random string from.') %
                         {
                             'min': CHARACTER_CLASSES_MIN
                         }),
                        constraints=[
                            constraints.AllowedValues([
                                'lettersdigits', 'letters', 'lowercase',
                                'uppercase', 'digits', 'hexdigits', 'octdigits'
                            ]),
                        ],
                        default='lettersdigits'),
                    CHARACTER_CLASSES_MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of characters from this '
                          'character class that will be in the generated '
                          'string.'),
                        default=1,
                        constraints=[
                            constraints.Range(1, 512),
                        ])
                })),
        CHARACTER_SEQUENCES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of character sequences and their constraints to '
              'generate the random string from.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CHARACTER_SEQUENCES_SEQUENCE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('A character sequence and its corresponding %(min)s '
                          'constraint to generate the random string '
                          'from.') % {'min': CHARACTER_SEQUENCES_MIN},
                        required=True),
                    CHARACTER_SEQUENCES_MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of characters from this '
                          'sequence that will be in the generated '
                          'string.'),
                        default=1,
                        constraints=[
                            constraints.Range(1, 512),
                        ])
                })),
        SALT:
        properties.Schema(
            properties.Schema.STRING,
            _('Value which can be set or changed on stack update to trigger '
              'the resource for replacement with a new random string . The '
              'salt value itself is ignored by the random generator.')),
    }

    attributes_schema = {
        VALUE:
        attributes.Schema(_(
            'The random string generated by this resource. This value is '
            'also available by referencing the resource.'),
                          cache_mode=attributes.Schema.CACHE_NONE),
    }

    _sequences = {
        'lettersdigits': string.ascii_letters + string.digits,
        'letters': string.ascii_letters,
        'lowercase': string.ascii_lowercase,
        'uppercase': string.ascii_uppercase,
        'digits': string.digits,
        'hexdigits': string.digits + 'ABCDEF',
        'octdigits': string.octdigits
    }

    @staticmethod
    def _deprecated_random_string(sequence, length):
        rand = random.SystemRandom()
        return ''.join(rand.choice(sequence) for x in xrange(length))

    def _generate_random_string(self, char_sequences, char_classes, length):
        random_string = ""

        # Add the minimum number of chars from each char sequence & char class
        if char_sequences:
            for char_seq in char_sequences:
                seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE]
                seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN]
                for _ in xrange(seq_min):
                    random_string += random.choice(seq)

        if char_classes:
            for char_class in char_classes:
                cclass_class = char_class[self.CHARACTER_CLASSES_CLASS]
                cclass_seq = self._sequences[cclass_class]
                cclass_min = char_class[self.CHARACTER_CLASSES_MIN]
                for _ in xrange(cclass_min):
                    random_string += random.choice(cclass_seq)

        def random_class_char():
            cclass_dict = random.choice(char_classes)
            cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS]
            cclass_seq = self._sequences[cclass_class]
            return random.choice(cclass_seq)

        def random_seq_char():
            seq_dict = random.choice(char_sequences)
            seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE]
            return random.choice(seq)

        # Fill up rest with random chars from provided sequences & classes
        if char_sequences and char_classes:
            weighted_choices = ([True] * len(char_classes) +
                                [False] * len(char_sequences))
            while len(random_string) < length:
                if random.choice(weighted_choices):
                    random_string += random_class_char()
                else:
                    random_string += random_seq_char()

        elif char_sequences:
            while len(random_string) < length:
                random_string += random_seq_char()

        else:
            while len(random_string) < length:
                random_string += random_class_char()

        # Randomize string
        random_string = ''.join(
            random.sample(random_string, len(random_string)))
        return random_string

    def validate(self):
        sequence = self.properties.get(self.SEQUENCE)
        char_sequences = self.properties.get(self.CHARACTER_SEQUENCES)
        char_classes = self.properties.get(self.CHARACTER_CLASSES)

        if sequence and (char_sequences or char_classes):
            msg = (_("Cannot use deprecated '%(seq)s' property along with "
                     "'%(char_seqs)s' or '%(char_classes)s' properties") % {
                         'seq': self.SEQUENCE,
                         'char_seqs': self.CHARACTER_SEQUENCES,
                         'char_classes': self.CHARACTER_CLASSES
                     })
            raise exception.StackValidationFailed(message=msg)

        def char_min(char_dicts, min_prop):
            if char_dicts:
                return sum(char_dict[min_prop] for char_dict in char_dicts)
            return 0

        length = self.properties.get(self.LENGTH)
        min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) +
                      char_min(char_classes, self.CHARACTER_CLASSES_MIN))
        if min_length > length:
            msg = _("Length property cannot be smaller than combined "
                    "character class and character sequence minimums")
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        char_sequences = self.properties.get(self.CHARACTER_SEQUENCES)
        char_classes = self.properties.get(self.CHARACTER_CLASSES)
        length = self.properties.get(self.LENGTH)

        if char_sequences or char_classes:
            random_string = self._generate_random_string(
                char_sequences, char_classes, length)
        else:
            sequence = self.properties.get(self.SEQUENCE)
            if not sequence:  # Deprecated property not provided, use a default
                sequence = "lettersdigits"

            char_seq = self._sequences[sequence]
            random_string = self._deprecated_random_string(char_seq, length)

        self.data_set('value', random_string, redact=True)
        self.resource_id_set(random_string)

    def _resolve_attribute(self, name):
        if name == self.VALUE:
            return self.data().get(self.VALUE)
Ejemplo n.º 13
0
class PoolMember(neutron.NeutronResource):
    """A resource for managing LBaaS v2 Pool Members.

    A pool member represents a single backend node.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'lbaasv2'

    entity = 'lbaas_member'

    res_info_key = 'member'

    PROPERTIES = (
        POOL, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
        SUBNET,
    ) = (
        'pool', 'address', 'protocol_port', 'weight', 'admin_state_up',
        'subnet'
    )

    ATTRIBUTES = (
        ADDRESS_ATTR, POOL_ID_ATTR
    ) = (
        'address', 'pool_id'
    )

    properties_schema = {
        POOL: properties.Schema(
            properties.Schema.STRING,
            _('Name or ID of the load balancing pool.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('neutron.lbaas.pool')
            ]
        ),
        ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('IP address of the pool member on the pool network.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('ip_addr')
            ]
        ),
        PROTOCOL_PORT: properties.Schema(
            properties.Schema.INTEGER,
            _('Port on which the pool member listens for requests or '
              'connections.'),
            required=True,
            constraints=[
                constraints.Range(1, 65535),
            ]
        ),
        WEIGHT: properties.Schema(
            properties.Schema.INTEGER,
            _('Weight of pool member in the pool (default to 1).'),
            default=1,
            constraints=[
                constraints.Range(0, 256),
            ],
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the pool member.'),
            default=True,
            update_allowed=True
        ),
        SUBNET: properties.Schema(
            properties.Schema.STRING,
            _('Subnet name or ID of this member.'),
            constraints=[
                constraints.CustomConstraint('neutron.subnet')
            ],
            # Make this required untill bug #1585100 is resolved.
            required=True
        ),
    }

    attributes_schema = {
        ADDRESS_ATTR: attributes.Schema(
            _('The IP address of the pool member.'),
            type=attributes.Schema.STRING
        ),
        POOL_ID_ATTR: attributes.Schema(
            _('The ID of the pool to which the pool member belongs.'),
            type=attributes.Schema.STRING
        )
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.SUBNET],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='subnet'
            ),
        ]

    def __init__(self, name, definition, stack):
        super(PoolMember, self).__init__(name, definition, stack)
        self._pool_id = None
        self._lb_id = None

    @property
    def pool_id(self):
        if self._pool_id is None:
            self._pool_id = self.client_plugin().find_resourceid_by_name_or_id(
                self.POOL,
                self.properties[self.POOL])
        return self._pool_id

    @property
    def lb_id(self):
        if self._lb_id is None:
            pool = self.client().show_lbaas_pool(self.pool_id)['pool']

            listener_id = pool['listeners'][0]['id']
            listener = self.client().show_listener(listener_id)['listener']

            self._lb_id = listener['loadbalancers'][0]['id']
        return self._lb_id

    def _check_lb_status(self):
        return self.client_plugin().check_lb_status(self.lb_id)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())

        self.client_plugin().resolve_pool(
            properties, self.POOL, 'pool_id')
        properties.pop('pool_id')
        properties['subnet_id'] = properties.pop(self.SUBNET)
        return properties

    def check_create_complete(self, properties):
        if self.resource_id is None:
            try:
                member = self.client().create_lbaas_member(
                    self.pool_id, {'member': properties})['member']
                self.resource_id_set(member['id'])
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def _res_get_args(self):
        return [self.resource_id, self.pool_id]

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        self._update_called = False
        return prop_diff

    def check_update_complete(self, prop_diff):
        if not prop_diff:
            return True

        if not self._update_called:
            try:
                self.client().update_lbaas_member(self.resource_id,
                                                  self.pool_id,
                                                  {'member': prop_diff})
                self._update_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def handle_delete(self):
        self._delete_called = False

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        if not self._delete_called:
            try:
                self.client().delete_lbaas_member(self.resource_id,
                                                  self.pool_id)
                self._delete_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                elif self.client_plugin().is_not_found(ex):
                    return True
                raise

        return self._check_lb_status()
Ejemplo n.º 14
0
class DesignateDomain(resource.Resource):
    """Heat Template Resource for Designate Domain.

    Designate provides DNS-as-a-Service services for OpenStack. So, domain
    is a realm with an identification string, unique in DNS.
    """

    support_status = support.SupportStatus(
        status=support.HIDDEN,
        version='10.0.0',
        message=_('Use OS::Designate::Zone instead.'),
        previous_status=support.SupportStatus(
            status=support.DEPRECATED,
            version='8.0.0',
            previous_status=support.SupportStatus(version='5.0.0')))

    entity = 'domains'

    default_client_name = 'designate'

    PROPERTIES = (
        NAME, TTL, DESCRIPTION, EMAIL
    ) = (
        'name', 'ttl', 'description', 'email'
    )

    ATTRIBUTES = (
        SERIAL,
    ) = (
        'serial',
    )

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Domain name.'),
            required=True,
            constraints=[constraints.Length(max=255)]
        ),
        # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
        TTL: properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds).'),
            update_allowed=True,
            constraints=[constraints.Range(min=1,
                                           max=2147483647)]
        ),
        # designate mandates to the max length of 160 for description
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of domain.'),
            update_allowed=True,
            constraints=[constraints.Length(max=160)]
        ),
        EMAIL: properties.Schema(
            properties.Schema.STRING,
            _('Domain email.'),
            update_allowed=True,
            required=True
        )
    }

    attributes_schema = {
        SERIAL: attributes.Schema(
            _("DNS domain serial."),
            type=attributes.Schema.STRING
        ),
    }

    def handle_create(self):
        args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
        domain = self.client_plugin().domain_create(**args)

        self.resource_id_set(domain.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict()

        if prop_diff.get(self.EMAIL):
            args['email'] = prop_diff.get(self.EMAIL)

        if prop_diff.get(self.TTL):
            args['ttl'] = prop_diff.get(self.TTL)

        if prop_diff.get(self.DESCRIPTION):
            args['description'] = prop_diff.get(self.DESCRIPTION)

        if len(args.keys()) > 0:
            args['id'] = self.resource_id
            self.client_plugin().domain_update(**args)

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.SERIAL:
            domain = self.client().domains.get(self.resource_id)
            return domain.serial

    # FIXME(kanagaraj-manickam) Remove this method once designate defect
    # 1485552 is fixed.
    def _show_resource(self):
        return dict(self.client().domains.get(self.resource_id).items())

    def parse_live_resource_data(self, resource_properties, resource_data):
        domain_reality = {}

        for key in self.PROPERTIES:
            domain_reality.update({key: resource_data.get(key)})

        return domain_reality
Ejemplo n.º 15
0
class RandomString(resource.Resource):
    '''
    A resource which generates a random string.

    This is useful for configuring passwords and secrets on services.
    '''
    PROPERTIES = (
        LENGTH,
        SEQUENCE,
        SALT,
    ) = (
        'length',
        'sequence',
        'salt',
    )

    ATTRIBUTES = (VALUE, ) = ('value', )

    properties_schema = {
        LENGTH:
        properties.Schema(properties.Schema.INTEGER,
                          _('Length of the string to generate.'),
                          default=32,
                          constraints=[
                              constraints.Range(1, 512),
                          ]),
        SEQUENCE:
        properties.Schema(
            properties.Schema.STRING,
            _('Sequence of characters to build the random string from.'),
            default='lettersdigits',
            constraints=[
                constraints.AllowedValues([
                    'lettersdigits', 'letters', 'lowercase', 'uppercase',
                    'digits', 'hexdigits', 'octdigits'
                ]),
            ]),
        SALT:
        properties.Schema(
            properties.Schema.STRING,
            _('Value which can be set or changed on stack update to trigger '
              'the resource for replacement with a new random string . The '
              'salt value itself is ignored by the random generator.')),
    }

    attributes_schema = {
        VALUE:
        attributes.Schema(
            _('The random string generated by this resource. This value is '
              'also available by referencing the resource.')),
    }

    _sequences = {
        'lettersdigits': string.ascii_letters + string.digits,
        'letters': string.ascii_letters,
        'lowercase': string.ascii_lowercase,
        'uppercase': string.ascii_uppercase,
        'digits': string.digits,
        'hexdigits': string.digits + 'ABCDEF',
        'octdigits': string.octdigits
    }

    @staticmethod
    def _generate_random_string(sequence, length):
        rand = random.SystemRandom()
        return ''.join(rand.choice(sequence) for x in xrange(length))

    def handle_create(self):
        length = self.properties.get(self.LENGTH)
        sequence = self._sequences[self.properties.get(self.SEQUENCE)]
        random_string = self._generate_random_string(sequence, length)
        self.data_set('value', random_string, redact=True)
        self.resource_id_set(random_string)

    def _resolve_attribute(self, name):
        if name == self.VALUE:
            return self.data().get(self.VALUE)
Ejemplo n.º 16
0
class BayModel(resource.Resource):
    """
    A resource for the BayModel in Magnum.
    """

    support_status = support.SupportStatus(version='5.0.0')

    PROPERTIES = (NAME, IMAGE, FLAVOR, MASTER_FLAVOR, KEYPAIR,
                  EXTERNAL_NETWORK, FIXED_NETWORK, DNS_NAMESERVER,
                  DOCKER_VOLUME_SIZE, SSH_AUTHORIZED_KEY,
                  COE) = ('name', 'image', 'flavor', 'master_flavor',
                          'keypair', 'external_network', 'fixed_network',
                          'dns_nameserver', 'docker_volume_size',
                          'ssh_authorized_key', 'coe')

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The bay model name.'),
        ),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('The image name or UUID to use as a base image for this '
              'baymodel.'),
            constraints=[constraints.CustomConstraint('glance.image')],
            required=True),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('The flavor of this bay model.'),
            constraints=[constraints.CustomConstraint('nova.flavor')]),
        MASTER_FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('The flavor of the master node for this bay model.'),
            constraints=[constraints.CustomConstraint('nova.flavor')]),
        KEYPAIR:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or id of the nova ssh keypair.'),
            constraints=[constraints.CustomConstraint('nova.keypair')],
            required=True),
        EXTERNAL_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('The external network to attach the Bay.'),
            constraints=[constraints.CustomConstraint('neutron.network')],
            required=True),
        FIXED_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('The fixed network to attach the Bay.'),
            constraints=[constraints.CustomConstraint('neutron.network')]),
        DNS_NAMESERVER:
        properties.Schema(
            properties.Schema.STRING,
            _('The DNS nameserver address.'),
            constraints=[constraints.CustomConstraint('ip_addr')]),
        DOCKER_VOLUME_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('The size in GB of the docker volume.'),
                          constraints=[
                              constraints.Range(min=1),
                          ]),
        SSH_AUTHORIZED_KEY:
        properties.Schema(
            properties.Schema.STRING,
            _('The SSH Authorized Key.'),
        ),
        COE:
        properties.Schema(
            properties.Schema.STRING,
            _('The Container Orchestration Engine for this bay model.'),
            constraints=[constraints.AllowedValues(['kubernetes', 'swarm'])],
            required=True)
    }

    default_client_name = 'magnum'

    def handle_create(self):
        args = {
            'name': self.properties[self.NAME],
            'image_id': self.properties[self.IMAGE],
            'flavor_id': self.properties[self.FLAVOR],
            'master_flavor_id': self.properties[self.MASTER_FLAVOR],
            'keypair_id': self.properties[self.KEYPAIR],
            'external_network_id': self.properties[self.EXTERNAL_NETWORK],
            'fixed_network': self.properties[self.FIXED_NETWORK],
            'dns_nameserver': self.properties[self.DNS_NAMESERVER],
            'docker_volume_size': self.properties[self.DOCKER_VOLUME_SIZE],
            'ssh_authorized_key': self.properties[self.SSH_AUTHORIZED_KEY],
            'coe': self.properties[self.COE]
        }
        bm = self.client().baymodels.create(**args)
        self.resource_id_set(bm.uuid)

    def handle_delete(self):
        if not self.resource_id:
            return
        try:
            self.client().baymodels.delete(self.resource_id)
        except Exception as exc:
            self.client_plugin().ignore_not_found(exc)
Ejemplo n.º 17
0
class Volume(resource.Resource):

    PROPERTIES = (
        AVAILABILITY_ZONE, SIZE, BACKUP_ID, TAGS,
    ) = (
        'AvailabilityZone', 'Size', 'SnapshotId', 'Tags',
    )

    _TAG_KEYS = (
        TAG_KEY, TAG_VALUE,
    ) = (
        'Key', 'Value',
    )

    properties_schema = {
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _('The availability zone in which the volume will be created.'),
            required=True
        ),
        SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('The size of the volume in GB. '
              'On update only increase in size is supported.'),
            update_allowed=True,
            constraints=[
                constraints.Range(min=1),
            ]
        ),
        BACKUP_ID: properties.Schema(
            properties.Schema.STRING,
            _('If specified, the backup used as the source to create the '
              'volume.')
        ),
        TAGS: properties.Schema(
            properties.Schema.LIST,
            _('The list of tags to associate with the volume.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TAG_KEY: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                    TAG_VALUE: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                },
            )
        ),
    }

    _volume_creating_status = ['creating', 'restoring-backup']

    def _display_name(self):
        return self.physical_resource_name()

    def _display_description(self):
        return self.physical_resource_name()

    def _create_arguments(self):
        if self.properties[self.TAGS]:
            tags = dict((tm[self.TAG_KEY], tm[self.TAG_VALUE])
                        for tm in self.properties[self.TAGS])
        else:
            tags = None

        return {
            'size': self.properties[self.SIZE],
            'availability_zone': (self.properties[self.AVAILABILITY_ZONE] or
                                  None),
            'metadata': tags
        }

    def handle_create(self):
        backup_id = self.properties.get(self.BACKUP_ID)
        cinder = self.cinder()
        if backup_id is not None:
            if volume_backups is None:
                raise exception.Error(_('Backups not supported.'))
            vol_id = cinder.restores.restore(backup_id).volume_id

            vol = cinder.volumes.get(vol_id)
            vol.update(
                display_name=self._display_name(),
                display_description=self._display_description())
        else:
            vol = cinder.volumes.create(
                display_name=self._display_name(),
                display_description=self._display_description(),
                **self._create_arguments())
        self.resource_id_set(vol.id)

        return vol

    def check_create_complete(self, vol):
        vol.get()

        if vol.status == 'available':
            return True
        elif vol.status in self._volume_creating_status:
            return False
        else:
            raise exception.Error(vol.status)

    def _backup(self):
        backup = self.cinder().backups.create(self.resource_id)
        while backup.status == 'creating':
            yield
            backup.get()
        if backup.status != 'available':
            raise exception.Error(backup.status)

    @scheduler.wrappertask
    def _delete(self, backup=False):
        if self.resource_id is not None:
            try:
                vol = self.cinder().volumes.get(self.resource_id)

                if backup:
                    yield self._backup()
                    vol.get()

                if vol.status == 'in-use':
                    LOG.warn(_('can not delete volume when in-use'))
                    raise exception.Error(_('Volume in use'))

                vol.delete()
                while True:
                    yield
                    vol.get()
            except clients.cinderclient.exceptions.NotFound:
                self.resource_id_set(None)

    if volume_backups is not None:
        def handle_snapshot_delete(self, state):
            backup = state not in ((self.CREATE, self.FAILED),
                                   (self.UPDATE, self.FAILED))

            delete_task = scheduler.TaskRunner(self._delete, backup=backup)
            delete_task.start()
            return delete_task

    def handle_delete(self):
        delete_task = scheduler.TaskRunner(self._delete)
        delete_task.start()
        return delete_task

    def check_delete_complete(self, delete_task):
        return delete_task.step()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        checkers = []
        if self.SIZE in prop_diff:
            new_size = prop_diff[self.SIZE]
            vol = self.cinder().volumes.get(self.resource_id)

            if new_size < vol.size:
                raise exception.NotSupported(feature=_("Shrinking volume"))

            elif new_size > vol.size:
                if vol.attachments:
                    #NOTE(pshchelo):
                    # this relies on current behaviour of cinder attachments,
                    # i.e. volume attachments is a list with len<=1,
                    # so the volume can be attached only to single instance,
                    # and id of attachment is the same as id of the volume
                    # it describes, so detach/attach the same volume
                    # will not change volume attachment id.
                    server_id = vol.attachments[0]['server_id']
                    device = vol.attachments[0]['device']
                    attachment_id = vol.attachments[0]['id']
                    detach_task = VolumeDetachTask(self.stack, server_id,
                                                   attachment_id)
                    checkers.append(scheduler.TaskRunner(detach_task))
                    extend_task = VolumeExtendTask(self.stack, vol.id,
                                                   new_size)
                    checkers.append(scheduler.TaskRunner(extend_task))
                    attach_task = VolumeAttachTask(self.stack, server_id,
                                                   vol.id, device)
                    checkers.append(scheduler.TaskRunner(attach_task))

                else:
                    extend_task = VolumeExtendTask(self.stack, vol.id,
                                                   new_size)
                    checkers.append(scheduler.TaskRunner(extend_task))

        if checkers:
            checkers[0].start()
        return checkers

    def check_update_complete(self, checkers):
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True
Ejemplo n.º 18
0
class GlanceWebImage(resource.Resource):
    """A resource managing images in Glance using web-download import.

    This provides image support for recent Glance installation.
    """

    support_status = support.SupportStatus(version='12.0.0')

    PROPERTIES = (NAME, IMAGE_ID, MIN_DISK, MIN_RAM, PROTECTED, DISK_FORMAT,
                  CONTAINER_FORMAT, LOCATION, TAGS, ARCHITECTURE, KERNEL_ID,
                  OS_DISTRO, OS_VERSION, OWNER, VISIBILITY,
                  RAMDISK_ID) = ('name', 'id', 'min_disk', 'min_ram',
                                 'protected', 'disk_format',
                                 'container_format', 'location', 'tags',
                                 'architecture', 'kernel_id', 'os_distro',
                                 'os_version', 'owner', 'visibility',
                                 'ramdisk_id')

    glance_id_pattern = ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
                         '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$')

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name for the image. The name of an image is not '
              'unique to a Image Service node.')),
        IMAGE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The image ID. Glance will generate a UUID if not specified.')),
        MIN_DISK:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of disk space (in GB) required to boot image. '
              'Default value is 0 if not specified '
              'and means no limit on the disk size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0),
        MIN_RAM:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of ram (in MB) required to boot image. Default value '
              'is 0 if not specified and means no limit on the ram size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0),
        PROTECTED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the image can be deleted. If the value is True, '
              'the image is protected and cannot be deleted.'),
            default=False),
        DISK_FORMAT:
        properties.Schema(properties.Schema.STRING,
                          _('Disk format of image.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'ami', 'ari', 'aki', 'vhd', 'vhdx', 'vmdk',
                                  'raw', 'qcow2', 'vdi', 'iso', 'ploop'
                              ])
                          ]),
        CONTAINER_FORMAT:
        properties.Schema(properties.Schema.STRING,
                          _('Container format of image.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'ami', 'ari', 'aki', 'bare', 'ovf', 'ova',
                                  'docker'
                              ])
                          ]),
        LOCATION:
        properties.Schema(
            properties.Schema.STRING,
            _('URL where the data for this image already resides. For '
              'example, if the image data is stored in swift, you could '
              'specify "swift://example.com/container/obj".'),
            required=True,
        ),
        TAGS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of image tags.'),
            update_allowed=True,
        ),
        ARCHITECTURE:
        properties.Schema(
            properties.Schema.STRING,
            _('Operating system architecture.'),
            update_allowed=True,
        ),
        KERNEL_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of image stored in Glance that should be used as '
              'the kernel when booting an AMI-style image.'),
            update_allowed=True,
            constraints=[constraints.AllowedPattern(glance_id_pattern)]),
        OS_DISTRO:
        properties.Schema(
            properties.Schema.STRING,
            _('The common name of the operating system distribution '
              'in lowercase.'),
            update_allowed=True,
        ),
        OS_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Operating system version as specified by the distributor.'),
            update_allowed=True,
        ),
        OWNER:
        properties.Schema(
            properties.Schema.STRING,
            _('Owner of the image.'),
            update_allowed=True,
        ),
        VISIBILITY:
        properties.Schema(properties.Schema.STRING,
                          _('Scope of image accessibility.'),
                          update_allowed=True,
                          default='private',
                          constraints=[
                              constraints.AllowedValues(
                                  ['public', 'private', 'community', 'shared'])
                          ]),
        RAMDISK_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of image stored in Glance that should be used as '
              'the ramdisk when booting an AMI-style image.'),
            update_allowed=True,
            constraints=[constraints.AllowedPattern(glance_id_pattern)])
    }

    default_client_name = 'glance'

    entity = 'images'

    def handle_create(self):
        args = dict(
            (k, v) for k, v in self.properties.items() if v is not None)

        location = args.pop(self.LOCATION)
        images = self.client().images
        image_id = images.create(**args).id
        self.resource_id_set(image_id)

        images.image_import(image_id, method='web-download', uri=location)

        return image_id

    def check_create_complete(self, image_id):
        image = self.client().images.get(image_id)
        return image.status == 'active'

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff and self.TAGS in prop_diff:
            existing_tags = self.properties.get(self.TAGS) or []
            diff_tags = prop_diff.pop(self.TAGS) or []

            new_tags = set(diff_tags) - set(existing_tags)
            for tag in new_tags:
                self.client().image_tags.update(self.resource_id, tag)

            removed_tags = set(existing_tags) - set(diff_tags)
            for tag in removed_tags:
                with self.client_plugin().ignore_not_found:
                    self.client().image_tags.delete(self.resource_id, tag)

        images = self.client().images

        images.update(self.resource_id, **prop_diff)

    def validate(self):
        super(GlanceWebImage, self).validate()
        container_format = self.properties[self.CONTAINER_FORMAT]
        if (container_format in ['ami', 'ari', 'aki']
                and self.properties[self.DISK_FORMAT] != container_format):
            msg = _("Invalid mix of disk and container formats. When "
                    "setting a disk or container format to one of 'aki', "
                    "'ari', or 'ami', the container and disk formats must "
                    "match.")
            raise exception.StackValidationFailed(message=msg)

    def get_live_resource_data(self):
        image_data = super(GlanceWebImage, self).get_live_resource_data()
        if image_data.get('status') in ('deleted', 'killed'):
            raise exception.EntityNotFound(entity='Resource', name=self.name)
        return image_data

    def parse_live_resource_data(self, resource_properties, resource_data):
        image_reality = {}

        for key in self.PROPERTIES:
            if key == self.LOCATION:
                continue
            if key == self.IMAGE_ID:
                if (resource_properties.get(self.IMAGE_ID) is not None or
                        resource_data.get(self.IMAGE_ID) != self.resource_id):
                    image_reality.update(
                        {self.IMAGE_ID: resource_data.get(self.IMAGE_ID)})
                else:
                    image_reality.update({self.IMAGE_ID: None})
            else:
                image_reality.update({key: resource_data.get(key)})

        return image_reality
Ejemplo n.º 19
0
class Instance(resource.Resource):
    """OpenStack cloud database instance resource.

    Trove is Database as a Service for OpenStack. It's designed to run entirely
    on OpenStack, with the goal of allowing users to quickly and easily utilize
    the features of a relational or non-relational database without the burden
    of handling complex administrative tasks.
    """

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
        REPLICA_OF,
        REPLICA_COUNT,
    ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
         'restore_point', 'datastore_type', 'datastore_version', 'networks',
         'replica_of', 'replica_count')

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          update_allowed=True,
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          update_allowed=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IPv4 address for this NIC.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_\-]+'
                                                       r'[a-zA-Z0-9_@?#\s\-]*'
                                                       r'[a-zA-Z0-9_\-]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      update_allowed=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%',
                        update_allowed=True),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
        REPLICA_OF:
        properties.Schema(
            properties.Schema.STRING,
            _('Identifier of the source instance to replicate.'),
            support_status=support.SupportStatus(version='5.0.0')),
        REPLICA_COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of replicas to be created.'),
            support_status=support.SupportStatus(version='5.0.0')),
    }

    attributes_schema = {
        HOSTNAME:
        attributes.Schema(_("Hostname of the instance."),
                          type=attributes.Schema.STRING),
        HREF:
        attributes.Schema(_("Api endpoint reference of the instance."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'trove'

    entity = 'instances'

    def translation_rules(self, properties):
        return [
            translation.TranslationRule(properties,
                                        translation.TranslationRule.RESOLVE,
                                        [self.FLAVOR],
                                        client_plugin=self.client_plugin(),
                                        finder='find_flavor_by_name_or_id')
        ]

    def __init__(self, name, json_snippet, stack):
        super(Instance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.client().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        """Create cloud database instance."""
        self.flavor = self.properties[self.FLAVOR]
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties[self.DATABASES]
        self.users = self.properties[self.USERS]
        restore_point = self.properties[self.RESTORE_POINT]
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties[self.AVAILABILITY_ZONE]
        self.datastore_type = self.properties[self.DATASTORE_TYPE]
        self.datastore_version = self.properties[self.DATASTORE_VERSION]
        replica_of = self.properties[self.REPLICA_OF]
        replica_count = self.properties[self.REPLICA_COUNT]

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties[self.NICS]:
            nic_dict = {}
            net = nic.get(self.NET)
            port = nic.get(self.PORT)
            if net or port:
                neutron = self.client_plugin('neutron')
            if net:
                net_id = neutron.find_resourceid_by_name_or_id(
                    neutron.RES_TYPE_NETWORK, net)
                nic_dict['net-id'] = net_id
            if port:
                port_id = neutron.find_resourceid_by_name_or_id(
                    neutron.RES_TYPE_PORT, port)
                nic_dict['port-id'] = port_id
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create DB instance
        instance = self.client().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics,
            replica_of=replica_of,
            replica_count=replica_count)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.client().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warning(
                    "Stack %(name)s (%(id)s) received an "
                    "OverLimit response during instance.get():"
                    " %(exception)s", {
                        'name': self.stack.name,
                        'id': self.stack.id,
                        'exception': exc
                    })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        """Check if cloud DB instance creation is complete."""
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            "Database instance %(database)s created "
            "(flavor:%(flavor)s, volume:%(volume)s, "
            "datastore:%(datastore_type)s, "
            "datastore_version:%(datastore_version)s)", {
                'database': self._dbinstance_name(),
                'flavor': self.flavor,
                'volume': self.volume,
                'datastore_type': self.datastore_type,
                'datastore_version': self.datastore_version
            })
        return True

    def handle_check(self):
        instance = self.client().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        updates = {}
        if prop_diff:
            instance = self.client().instances.get(self.resource_id)
            if self.NAME in prop_diff:
                updates.update({self.NAME: prop_diff[self.NAME]})
            if self.FLAVOR in prop_diff:
                flv = prop_diff[self.FLAVOR]
                updates.update({self.FLAVOR: flv})
            if self.SIZE in prop_diff:
                updates.update({self.SIZE: prop_diff[self.SIZE]})
            if self.DATABASES in prop_diff:
                current = [
                    d.name for d in self.client().databases.list(instance)
                ]
                desired = [
                    d[self.DATABASE_NAME] for d in prop_diff[self.DATABASES]
                ]
                for db in prop_diff[self.DATABASES]:
                    dbname = db[self.DATABASE_NAME]
                    if dbname not in current:
                        db['ACTION'] = self.CREATE
                for dbname in current:
                    if dbname not in desired:
                        deleted = {
                            self.DATABASE_NAME: dbname,
                            'ACTION': self.DELETE
                        }
                        prop_diff[self.DATABASES].append(deleted)
                updates.update({self.DATABASES: prop_diff[self.DATABASES]})
            if self.USERS in prop_diff:
                current = [u.name for u in self.client().users.list(instance)]
                desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]]
                for usr in prop_diff[self.USERS]:
                    if usr[self.USER_NAME] not in current:
                        usr['ACTION'] = self.CREATE
                for usr in current:
                    if usr not in desired:
                        prop_diff[self.USERS].append({
                            self.USER_NAME: usr,
                            'ACTION': self.DELETE
                        })
                updates.update({self.USERS: prop_diff[self.USERS]})
        return updates

    def check_update_complete(self, updates):
        instance = self.client().instances.get(self.resource_id)
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))
        if updates:
            if instance.status != self.ACTIVE:
                dmsg = ("Instance is in status %(now)s. Waiting on status"
                        " %(stat)s")
                LOG.debug(dmsg % {"now": instance.status, "stat": self.ACTIVE})
                return False
            try:
                return (
                    self._update_name(instance, updates.get(self.NAME))
                    and self._update_flavor(instance, updates.get(self.FLAVOR))
                    and self._update_size(instance, updates.get(self.SIZE))
                    and self._update_databases(instance,
                                               updates.get(self.DATABASES))
                    and self._update_users(instance, updates.get(self.USERS)))
            except Exception as exc:
                if self.client_plugin().is_client_exception(exc):
                    # the instance could have updated between the time
                    # we retrieve it and try to update it so check again
                    if self.client_plugin().is_over_limit(exc):
                        LOG.debug("API rate limit: %(ex)s. Retrying.",
                                  {'ex': str(exc)})
                        return False
                    if "No change was requested" in str(exc):
                        LOG.warning("Unexpected instance state change "
                                    "during update. Retrying.")
                        return False
                raise
        return True

    def _update_name(self, instance, name):
        if name and instance.name != name:
            self.client().instances.edit(instance, name=name)
            return False
        return True

    def _update_flavor(self, instance, new_flavor):
        if new_flavor:
            current_flav = str(instance.flavor['id'])
            new_flav = str(new_flavor)
            if new_flav != current_flav:
                dmsg = "Resizing instance flavor from %(old)s to %(new)s"
                LOG.debug(dmsg % {"old": current_flav, "new": new_flav})
                self.client().instances.resize_instance(instance, new_flavor)
                return False
        return True

    def _update_size(self, instance, new_size):
        if new_size and instance.volume['size'] != new_size:
            dmsg = "Resizing instance storage from %(old)s to %(new)s"
            LOG.debug(dmsg % {"old": instance.volume['size'], "new": new_size})
            self.client().instances.resize_volume(instance, new_size)
            return False
        return True

    def _update_databases(self, instance, databases):
        if databases:
            for db in databases:
                if db.get("ACTION") == self.CREATE:
                    db.pop("ACTION", None)
                    dmsg = "Adding new database %(db)s to instance"
                    LOG.debug(dmsg % {"db": db})
                    self.client().databases.create(instance, [db])
                elif db.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing database %(db)s from "
                            "instance")
                    LOG.debug(dmsg % {"db": db['name']})
                    self.client().databases.delete(instance, db['name'])
        return True

    def _update_users(self, instance, users):
        if users:
            for usr in users:
                dbs = [{'name': db} for db in usr.get(self.USER_DATABASES, [])]
                usr[self.USER_DATABASES] = dbs
                if usr.get("ACTION") == self.CREATE:
                    usr.pop("ACTION", None)
                    dmsg = "Adding new user %(u)s to instance"
                    LOG.debug(dmsg % {"u": usr})
                    self.client().users.create(instance, [usr])
                elif usr.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing user %(u)s from " "instance")
                    LOG.debug(dmsg % {"u": usr['name']})
                    self.client().users.delete(instance, usr['name'])
                else:
                    newattrs = {}
                    if usr.get(self.USER_HOST):
                        newattrs[self.USER_HOST] = usr[self.USER_HOST]
                    if usr.get(self.USER_PASSWORD):
                        newattrs[self.USER_PASSWORD] = usr[self.USER_PASSWORD]
                    if newattrs:
                        self.client().users.update_attributes(
                            instance,
                            usr['name'],
                            newuserattr=newattrs,
                            hostname=instance.hostname)
                    current = self.client().users.get(instance,
                                                      usr[self.USER_NAME])
                    dbs = [db['name'] for db in current.databases]
                    desired = [
                        db['name'] for db in usr.get(self.USER_DATABASES, [])
                    ]
                    grants = [db for db in desired if db not in dbs]
                    revokes = [db for db in dbs if db not in desired]
                    if grants:
                        self.client().users.grant(instance,
                                                  usr[self.USER_NAME], grants)
                    if revokes:
                        self.client().users.revoke(instance,
                                                   usr[self.USER_NAME],
                                                   revokes)
        return True

    def parse_live_resource_data(self, resource_properties, resource_data):
        """A method to parse live resource data to update current resource.

        NOTE: cannot update users from live resource data in case of
        impossibility to get required user password.
        """
        dbs = [d.name for d in self.client().databases.list(self.resource_id)]
        dbs_reality = []
        for resource_db in resource_properties[self.DATABASES]:
            if resource_db[self.DATABASE_NAME] in dbs:
                dbs_reality.append(resource_db)
                dbs.remove(resource_db[self.DATABASE_NAME])
        # cannot get any property for databases except for name, so update
        # resource with name
        dbs_reality.extend([{self.DATABASE_NAME: db} for db in dbs])
        result = {
            self.NAME: resource_data.get('name'),
            self.DATABASES: dbs_reality
        }
        if resource_data.get('flavor') is not None:
            result[self.FLAVOR] = resource_data['flavor'].get('id')
        if resource_data.get('volume') is not None:
            result[self.SIZE] = resource_data['volume']['size']
        return result

    def handle_delete(self):
        """Delete a cloud database instance."""
        if not self.resource_id:
            return

        try:
            instance = self.client().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        """Check for completion of cloud DB instance deletion."""
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        """Validate any of the provided params."""
        res = super(Instance, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties[self.USERS]
        if users:
            databases = self.properties[self.DATABASES]
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties[self.NICS]
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
Ejemplo n.º 20
0
class GlanceImage(resource.Resource):
    """A resource managing images in Glance.

    A resource provides managing images that are meant to be used with other
    services.
    """

    support_status = support.SupportStatus(
        status=support.DEPRECATED,
        version='8.0.0',
        message=_('Creating a Glance Image based on an existing URL location '
                  'requires the Glance v1 API, which is deprecated.'),
        previous_status=support.SupportStatus(version='2014.2'))

    PROPERTIES = (NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
                  DISK_FORMAT, CONTAINER_FORMAT, LOCATION, TAGS,
                  EXTRA_PROPERTIES, ARCHITECTURE, KERNEL_ID, OS_DISTRO, OWNER,
                  RAMDISK_ID) = ('name', 'id', 'is_public', 'min_disk',
                                 'min_ram', 'protected', 'disk_format',
                                 'container_format', 'location', 'tags',
                                 'extra_properties', 'architecture',
                                 'kernel_id', 'os_distro', 'owner',
                                 'ramdisk_id')

    glance_id_pattern = ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
                         '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$')

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name for the image. The name of an image is not '
              'unique to a Image Service node.')),
        IMAGE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The image ID. Glance will generate a UUID if not specified.')),
        IS_PUBLIC:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Scope of image accessibility. Public or private. '
              'Default value is False means private. Note: The policy '
              'setting of glance allows only users with admin roles to create '
              'public image by default.'),
            default=False,
        ),
        MIN_DISK:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of disk space (in GB) required to boot image. '
              'Default value is 0 if not specified '
              'and means no limit on the disk size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0),
        MIN_RAM:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of ram (in MB) required to boot image. Default value '
              'is 0 if not specified and means no limit on the ram size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0),
        PROTECTED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the image can be deleted. If the value is True, '
              'the image is protected and cannot be deleted.'),
            default=False),
        DISK_FORMAT:
        properties.Schema(properties.Schema.STRING,
                          _('Disk format of image.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw',
                                  'qcow2', 'vdi', 'iso'
                              ])
                          ]),
        CONTAINER_FORMAT:
        properties.Schema(properties.Schema.STRING,
                          _('Container format of image.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['ami', 'ari', 'aki', 'bare', 'ova', 'ovf'])
                          ]),
        LOCATION:
        properties.Schema(
            properties.Schema.STRING,
            _('URL where the data for this image already resides. For '
              'example, if the image data is stored in swift, you could '
              'specify "swift://example.com/container/obj".'),
            required=True,
        ),
        TAGS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of image tags.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')),
        EXTRA_PROPERTIES:
        properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary properties to associate with the image.'),
            update_allowed=True,
            default={},
            support_status=support.SupportStatus(version='7.0.0')),
        ARCHITECTURE:
        properties.Schema(
            properties.Schema.STRING,
            _('Operating system architecture.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')),
        KERNEL_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of image stored in Glance that should be used as '
              'the kernel when booting an AMI-style image.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0'),
            constraints=[constraints.AllowedPattern(glance_id_pattern)]),
        OS_DISTRO:
        properties.Schema(
            properties.Schema.STRING,
            _('The common name of the operating system distribution '
              'in lowercase.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')),
        OWNER:
        properties.Schema(
            properties.Schema.STRING,
            _('Owner of the image.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')),
        RAMDISK_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of image stored in Glance that should be used as '
              'the ramdisk when booting an AMI-style image.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0'),
            constraints=[constraints.AllowedPattern(glance_id_pattern)])
    }

    default_client_name = 'glance'

    entity = 'images'

    def handle_create(self):
        args = dict(
            (k, v) for k, v in self.properties.items() if v is not None)

        tags = args.pop(self.TAGS, [])
        args['properties'] = args.pop(self.EXTRA_PROPERTIES, {})
        architecture = args.pop(self.ARCHITECTURE, None)
        kernel_id = args.pop(self.KERNEL_ID, None)
        os_distro = args.pop(self.OS_DISTRO, None)
        ramdisk_id = args.pop(self.RAMDISK_ID, None)

        image_id = self.client(version=self.client_plugin().V1).images.create(
            **args).id
        self.resource_id_set(image_id)

        images = self.client().images
        if architecture is not None:
            images.update(image_id, architecture=architecture)
        if kernel_id is not None:
            images.update(image_id, kernel_id=kernel_id)
        if os_distro is not None:
            images.update(image_id, os_distro=os_distro)
        if ramdisk_id is not None:
            images.update(image_id, ramdisk_id=ramdisk_id)

        for tag in tags:
            self.client().image_tags.update(image_id, tag)

        return image_id

    def check_create_complete(self, image_id):
        image = self.client().images.get(image_id)
        return image.status == 'active'

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff and self.TAGS in prop_diff:
            existing_tags = self.properties.get(self.TAGS) or []
            diff_tags = prop_diff.pop(self.TAGS) or []

            new_tags = set(diff_tags) - set(existing_tags)
            for tag in new_tags:
                self.client().image_tags.update(self.resource_id, tag)

            removed_tags = set(existing_tags) - set(diff_tags)
            for tag in removed_tags:
                with self.client_plugin().ignore_not_found:
                    self.client().image_tags.delete(self.resource_id, tag)

        images = self.client().images

        if self.EXTRA_PROPERTIES in prop_diff:
            old_properties = self.properties.get(self.EXTRA_PROPERTIES) or {}
            new_properties = prop_diff.pop(self.EXTRA_PROPERTIES)
            prop_diff.update(new_properties)
            remove_props = list(set(old_properties) - set(new_properties))

            # Though remove_props defaults to None within the glanceclient,
            # setting it to a list (possibly []) every time ensures only one
            # calling format to images.update
            images.update(self.resource_id, remove_props, **prop_diff)
        else:
            images.update(self.resource_id, **prop_diff)

    def validate(self):
        super(GlanceImage, self).validate()
        container_format = self.properties[self.CONTAINER_FORMAT]
        if (container_format in ['ami', 'ari', 'aki']
                and self.properties[self.DISK_FORMAT] != container_format):
            msg = _("Invalid mix of disk and container formats. When "
                    "setting a disk or container format to one of 'aki', "
                    "'ari', or 'ami', the container and disk formats must "
                    "match.")
            raise exception.StackValidationFailed(message=msg)

    def get_live_resource_data(self):
        image_data = super(GlanceImage, self).get_live_resource_data()
        if image_data.get('status') in ('deleted', 'killed'):
            raise exception.EntityNotFound(entity='Resource', name=self.name)
        return image_data

    def parse_live_resource_data(self, resource_properties, resource_data):
        image_reality = {}

        # NOTE(prazumovsky): At first, there's no way to get location from
        # glance; at second, location property is doubtful, because glance
        # client v2 doesn't use location, it uses locations. So, we should
        # get location property from resource properties.
        if self.client().version == 1.0:
            image_reality.update(
                {self.LOCATION: resource_properties[self.LOCATION]})

        for key in self.PROPERTIES:
            if key == self.LOCATION:
                continue
            if key == self.IMAGE_ID:
                if (resource_properties.get(self.IMAGE_ID) is not None or
                        resource_data.get(self.IMAGE_ID) != self.resource_id):
                    image_reality.update(
                        {self.IMAGE_ID: resource_data.get(self.IMAGE_ID)})
                else:
                    image_reality.update({self.IMAGE_ID: None})
            else:
                image_reality.update({key: resource_data.get(key)})

        return image_reality
Ejemplo n.º 21
0
class LBNode(resource.Resource):
    """Represents a single node of a Rackspace Cloud Load Balancer"""

    default_client_name = 'cloud_lb'

    _CONDITIONS = (
        ENABLED,
        DISABLED,
        DRAINING,
    ) = (
        'ENABLED',
        'DISABLED',
        'DRAINING',
    )

    _NODE_KEYS = (ADDRESS, PORT, CONDITION, TYPE,
                  WEIGHT) = ('address', 'port', 'condition', 'type', 'weight')

    _OTHER_KEYS = (LOAD_BALANCER, DRAINING_TIMEOUT) = ('load_balancer',
                                                       'draining_timeout')

    PROPERTIES = _NODE_KEYS + _OTHER_KEYS

    properties_schema = {
        LOAD_BALANCER:
        properties.Schema(
            properties.Schema.STRING,
            _("The ID of the load balancer to associate the node with."),
            required=True),
        DRAINING_TIMEOUT:
        properties.Schema(
            properties.Schema.INTEGER,
            _("The time to wait, in seconds, for the node to drain before it "
              "is deleted."),
            default=0,
            constraints=[constraints.Range(min=0)],
            update_allowed=True),
        ADDRESS:
        properties.Schema(properties.Schema.STRING,
                          _("IP address for the node."),
                          required=True),
        PORT:
        properties.Schema(properties.Schema.INTEGER, required=True),
        CONDITION:
        properties.Schema(properties.Schema.STRING,
                          default=ENABLED,
                          constraints=[
                              constraints.AllowedValues(_CONDITIONS),
                          ],
                          update_allowed=True),
        TYPE:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['PRIMARY', 'SECONDARY']),
                          ],
                          update_allowed=True),
        WEIGHT:
        properties.Schema(properties.Schema.NUMBER,
                          constraints=[
                              constraints.Range(1, 100),
                          ],
                          update_allowed=True),
    }

    def lb(self):
        lb_id = self.properties.get(self.LOAD_BALANCER)
        lb = self.client().get(lb_id)

        if lb.status in ('DELETED', 'PENDING_DELETE'):
            raise LoadbalancerDeleted(lb_id=lb.id)

        return lb

    def node(self, lb):
        for node in getattr(lb, 'nodes', []):
            if node.id == self.resource_id:
                return node
        raise NodeNotFound(node_id=self.resource_id, lb_id=lb.id)

    def handle_create(self):
        pass

    def check_create_complete(self, *args):
        node_args = {k: self.properties.get(k) for k in self._NODE_KEYS}
        node = self.client().Node(**node_args)

        try:
            resp, body = self.lb().add_nodes([node])
        except Exception as exc:
            if lb_immutable(exc):
                return False
            raise

        new_node = body['nodes'][0]
        node_id = new_node['id']

        self.resource_id_set(node_id)
        return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        return prop_diff

    def check_update_complete(self, prop_diff):
        node = self.node(self.lb())
        is_complete = True

        for key in self._NODE_KEYS:
            if key in prop_diff and getattr(node, key, None) != prop_diff[key]:
                setattr(node, key, prop_diff[key])
                is_complete = False

        if is_complete:
            return True

        try:
            node.update()
        except Exception as exc:
            if lb_immutable(exc):
                return False
            raise

        return False

    def handle_delete(self):
        return timeutils.utcnow()

    def check_delete_complete(self, deleted_at):
        if self.resource_id is None:
            return True

        try:
            node = self.node(self.lb())
        except (NotFound, LoadbalancerDeleted, NodeNotFound):
            return True

        if isinstance(deleted_at, six.string_types):
            deleted_at = timeutils.parse_isotime(deleted_at)

        deleted_at = timeutils.normalize_time(deleted_at)
        waited = timeutils.utcnow() - deleted_at
        timeout_secs = self.properties[self.DRAINING_TIMEOUT]
        timeout_secs = datetime.timedelta(seconds=timeout_secs)

        if waited > timeout_secs:
            try:
                node.delete()
            except NotFound:
                return True
            except Exception as exc:
                if lb_immutable(exc):
                    return False
                raise
        elif node.condition != self.DRAINING:
            node.condition = self.DRAINING
            try:
                node.update()
            except Exception as exc:
                if lb_immutable(exc):
                    return False
                raise

        return False
Ejemplo n.º 22
0
class CloudBigData(resource.Resource):
    """Represents a Cloud Big Data resource."""
    support_status = support.SupportStatus(version='2015.8')

    PROPERTIES = (
        CLUSTER_NAME,
        STACK_ID,
        FLAVOR,
        NUM_SLAVES,
        CLUSTER_LOGIN,
        PUB_KEY_NAME,
        PUB_KEY,
    ) = (
        'clusterName',
        'stackId',
        'flavor',
        'numSlaveNodes',
        'clusterLogin',
        'publicKeyName',
        'publicKey',
    )

    properties_schema = {
        CLUSTER_NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Rackspace Cloud Big Data Cluster Name.'),
                          constraints=[
                              constraints.Length(
                                  max=50,
                                  description="Cluster name is to long.")
                          ],
                          required=True),
        STACK_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Rackspace Cloud Big Data Stack ID.'),
            constraints=[constraints.CustomConstraint('cbd.stack')],
            required=True),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Rackspace Cloud Big Data Flavor ID to be used for cluster slave'
              'nodes.'),
            constraints=[constraints.CustomConstraint('cbd.flavor')],
            required=True),
        CLUSTER_LOGIN:
        properties.Schema(properties.Schema.STRING,
                          _('Cluster SSH login.'),
                          constraints=[
                              constraints.Length(
                                  max=50,
                                  description="Cluster SSH login is to long.")
                          ],
                          required=True),
        NUM_SLAVES:
        properties.Schema(properties.Schema.INTEGER,
                          _('How many slave nodes to create in the cluster.'),
                          default=3,
                          constraints=[
                              constraints.Range(
                                  1, 10, "Number of slave nodes must be "
                                  "1-10."),
                          ]),
        PUB_KEY_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Cluster public key name. This key name will be used along with '
              'the publicKey by the Cloud Big Data system to install SSH keys '
              'on to CBD clusters for user access. If the key name already '
              'exists, it will not be overwritten and the existing key will '
              'be used instead.'),
            constraints=[
                constraints.Length(max=50,
                                   description="Public key name is to long.")
            ],
            required=True),
        PUB_KEY:
        properties.Schema(
            properties.Schema.STRING,
            _('Cluster public key used to SSH into cluster nodes.'),
            constraints=[
                constraints.Length(max=1000,
                                   description="Public key is to long.")
            ],
            required=True)
    }

    ATTRIBUTES = (CBD_VERSION) = ('cbdVersion')

    attributes_schema = {
        CBD_VERSION:
        attributes.Schema(_("Rackspace Cloud Big Data version"),
                          type=attributes.Schema.STRING)
    }

    default_client_name = "cloud_big_data"

    def handle_create(self):
        """Create a Rackspace Cloud Big Data Instance."""
        LOG.debug("Cloud Big Data handle_create called.")
        args = dict((key, val) for key, val in self.properties.items())
        # Create the cluster SSH key
        try:
            self.client().credentials.create_ssh_key(args[self.PUB_KEY_NAME],
                                                     args[self.PUB_KEY])
        except LavaError:
            pass  # A key may already exist

        # Create the cluster
        flavor_id = self.client_plugin().get_flavor_id(args[self.FLAVOR])
        num_slave_nodes = args[self.NUM_SLAVES]

        node_group_list = [{
            'flavor_id': flavor_id,
            'count': num_slave_nodes,
            'id': 'slave'
        }]
        try:
            cluster = self.client().clusters.create(
                name=args[self.CLUSTER_NAME],
                stack_id=args[self.STACK_ID],
                username=args[self.CLUSTER_LOGIN],
                ssh_keys=[args[self.PUB_KEY_NAME]],
                user_scripts=[],
                node_groups=node_group_list,
                connectors=[])
        except LavaError as exc:
            LOG.warning("Unable to create CBD cluster", exc_info=exc)
            raise
        self.resource_id_set(str(cluster.id))

    def _show_resource(self):
        """ Show cluster resource details"""
        return self.client().clusters.get(self.resource_id)

    def check_create_complete(self, ignored):
        """Check the cluster creation status."""
        try:
            cluster = self._show_resource()
        except RequestError as exc:
            # RequestError is the only exception that should be retried and
            # only a 503 HTTP status code should be retried. Only 4xx-5xx
            # codes are returned by this exception.
            if exc.code == 503:
                return False
            raise
        # If any other LavaError-based exception is raised, it is a failed
        # cluster create. Let the exception bubble up to Heat.

        if cluster.status == 'ACTIVE':
            return True
        if cluster.status == 'ERROR':
            raise LavaError("Cluster {} entered an error state".format(
                self.resource_id))
        return False

    def handle_delete(self):
        """Delete a Rackspace Cloud Big Data Instance."""
        LOG.debug("Cloud Big Data handle_delete called.")
        if self.resource_id:
            try:
                self.client().clusters.delete(self.resource_id)
            except LavaError as exc:
                self.client_plugin().ignore_not_found(exc)

    def check_delete_complete(self, ignored):
        """
        Return deletion status.
        :param result: None
        if handle_delete returns any result, we can use
        it here.
        """
        if self.resource_id is None:
            return True
        try:
            self.client().clusters.get(self.resource_id)
        except LavaError as exc:
            self.client_plugin().ignore_not_found(exc)
            return True
        return False

    def _resolve_attribute(self, name):
        """Enable returning of Cloud Big Data cluster ID."""
        try:
            cluster = self.client().clusters.get(self.resource_id)
        except LavaError as exc:
            LOG.debug("Unable to find CBD cluster", exc_info=exc)
            return None

        if name == self.CBD_VERSION:
            return cluster.cbd_version
Ejemplo n.º 23
0
class PoolMember(neutron.NeutronResource):
    """
    A resource to handle load balancer members.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        POOL_ID,
        ADDRESS,
        PROTOCOL_PORT,
        WEIGHT,
        ADMIN_STATE_UP,
    ) = (
        'pool_id',
        'address',
        'protocol_port',
        'weight',
        'admin_state_up',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        TENANT_ID,
        WEIGHT_ATTR,
        ADDRESS_ATTR,
        POOL_ID_ATTR,
        PROTOCOL_PORT_ATTR,
        SHOW,
    ) = (
        'admin_state_up',
        'tenant_id',
        'weight',
        'address',
        'pool_id',
        'protocol_port',
        'show',
    )

    properties_schema = {
        POOL_ID:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the load balancing pool.'),
                          required=True,
                          update_allowed=True),
        ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('IP address of the pool member on the pool network.'),
            required=True,
            constraints=[constraints.CustomConstraint('ip_addr')]),
        PROTOCOL_PORT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('TCP port on which the pool member listens for requests or '
              'connections.'),
            required=True,
            constraints=[
                constraints.Range(0, 65535),
            ]),
        WEIGHT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Weight of pool member in the pool (default to 1).'),
            constraints=[
                constraints.Range(0, 256),
            ],
            update_allowed=True),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of the pool member.'),
                          default=True),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_('The administrative state of this pool member.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Tenant owning the pool member.'),
                          type=attributes.Schema.STRING),
        WEIGHT_ATTR:
        attributes.Schema(_('Weight of the pool member in the pool.'),
                          type=attributes.Schema.STRING),
        ADDRESS_ATTR:
        attributes.Schema(_('IP address of the pool member.'),
                          type=attributes.Schema.STRING),
        POOL_ID_ATTR:
        attributes.Schema(_('The ID of the load balancing pool.'),
                          type=attributes.Schema.STRING),
        PROTOCOL_PORT_ATTR:
        attributes.Schema(_(
            'TCP port on which the pool member listens for requests or '
            'connections.'),
                          type=attributes.Schema.STRING),
        SHOW:
        attributes.Schema(_('All attributes.'), type=attributes.Schema.MAP),
    }

    def handle_create(self):
        pool = self.properties[self.POOL_ID]
        client = self.neutron()
        protocol_port = self.properties[self.PROTOCOL_PORT]
        address = self.properties[self.ADDRESS]
        admin_state_up = self.properties[self.ADMIN_STATE_UP]
        weight = self.properties[self.WEIGHT]

        params = {
            'pool_id': pool,
            'address': address,
            'protocol_port': protocol_port,
            'admin_state_up': admin_state_up
        }

        if weight is not None:
            params['weight'] = weight

        member = client.create_member({'member': params})['member']
        self.resource_id_set(member['id'])

    def _show_resource(self):
        return self.neutron().show_member(self.resource_id)['member']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.neutron().update_member(self.resource_id,
                                         {'member': prop_diff})

    def handle_delete(self):
        client = self.neutron()
        try:
            client.delete_member(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
Ejemplo n.º 24
0
                    properties.Schema.STRING,
                    _("Start time for the time constraint. "
                      "A CRON expression property."),
                    constraints=[
                        constraints.CustomConstraint('cron_expression')
                    ],
                    required=True),
                TIME_CONSTRAINT_DESCRIPTION:
                properties.Schema(
                    properties.Schema.STRING,
                    _("Description for the time constraint."),
                ),
                DURATION:
                properties.Schema(properties.Schema.INTEGER,
                                  _("Duration for the time constraint."),
                                  constraints=[constraints.Range(min=0)],
                                  required=True),
                TIMEZONE:
                properties.Schema(
                    properties.Schema.STRING,
                    _("Timezone for the time constraint "
                      "(eg. 'Taiwan/Taipei', 'Europe/Amsterdam')"),
                    constraints=[constraints.CustomConstraint('timezone')],
                )
            }),
        support_status=support.SupportStatus(version='5.0.0'),
        default=[],
    )
}

NOVA_METERS = [
Ejemplo n.º 25
0
class FlowClassifier(neutron.NeutronResource):
    """"Heat Template Resource for networking-sfc flow-classifier.

    This resource used to select the traffic that can access the service chain.
    Traffic that matches any flow classifier will be directed to the first
    port in the chain.
    """

    support_status = support.SupportStatus(version='8.0.0',
                                           status=support.UNSUPPORTED)

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        PROTOCOL,
        ETHERTYPE,
        SOURCE_IP_PREFIX,
        DESTINATION_IP_PREFIX,
        SOURCE_PORT_RANGE_MIN,
        SOURCE_PORT_RANGE_MAX,
        DESTINATION_PORT_RANGE_MIN,
        DESTINATION_PORT_RANGE_MAX,
        LOGICAL_SOURCE_PORT,
        LOGICAL_DESTINATION_PORT,
        L7_PARAMETERS,
    ) = (
        'name',
        'description',
        'protocol',
        'ethertype',
        'source_ip_prefix',
        'destination_ip_prefix',
        'source_port_range_min',
        'source_port_range_max',
        'destination_port_range_min',
        'destination_port_range_max',
        'logical_source_port',
        'logical_destination_port',
        'l7_parameters',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the Flow Classifier.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the Flow Classifier.'),
                          update_allowed=True),
        PROTOCOL:
        properties.Schema(
            properties.Schema.STRING,
            _('IP Protocol for the Flow Classifier.'),
            constraints=[
                constraints.AllowedValues(['tcp', 'udp', 'icmp']),
            ],
        ),
        ETHERTYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('L2 ethertype.'),
            default='IPv4',
            constraints=[
                constraints.AllowedValues(['IPv4', 'IPv6']),
            ],
        ),
        SOURCE_IP_PREFIX:
        properties.Schema(
            properties.Schema.STRING,
            _('Source IP prefix or subnet.'),
            constraints=[constraints.CustomConstraint('net_cidr')]),
        DESTINATION_IP_PREFIX:
        properties.Schema(
            properties.Schema.STRING,
            _('Destination IP prefix or subnet.'),
            constraints=[constraints.CustomConstraint('net_cidr')]),
        SOURCE_PORT_RANGE_MIN:
        properties.Schema(properties.Schema.INTEGER,
                          _('Source protocol port Minimum.'),
                          constraints=[constraints.Range(1, 65535)]),
        SOURCE_PORT_RANGE_MAX:
        properties.Schema(properties.Schema.INTEGER,
                          _('Source protocol port Maximum.'),
                          constraints=[constraints.Range(1, 65535)]),
        DESTINATION_PORT_RANGE_MIN:
        properties.Schema(properties.Schema.INTEGER,
                          _('Destination protocol port minimum.'),
                          constraints=[constraints.Range(1, 65535)]),
        DESTINATION_PORT_RANGE_MAX:
        properties.Schema(properties.Schema.INTEGER,
                          _('Destination protocol port maximum.'),
                          constraints=[constraints.Range(1, 65535)]),
        LOGICAL_SOURCE_PORT:
        properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the neutron source port.'),
            constraints=[constraints.CustomConstraint('neutron.port')]),
        LOGICAL_DESTINATION_PORT:
        properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the neutron destination port.'),
            constraints=[constraints.CustomConstraint('neutron.port')]),
        L7_PARAMETERS:
        properties.Schema(
            properties.Schema.MAP,
            _('Dictionary of L7-parameters.'),
            support_status=support.SupportStatus(
                status=support.UNSUPPORTED,
                message=_('Currently, no value is supported for this option.'),
            ),
        ),
    }

    def translation_rules(self, props):
        client_plugin = self.client_plugin()
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.LOGICAL_SOURCE_PORT],
                                        client_plugin=client_plugin,
                                        finder='find_resourceid_by_name_or_id',
                                        entity=client_plugin.RES_TYPE_PORT),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.LOGICAL_DESTINATION_PORT],
                                        client_plugin=client_plugin,
                                        finder='find_resourceid_by_name_or_id',
                                        entity=client_plugin.RES_TYPE_PORT)
        ]

    def _show_resource(self):
        return self.client_plugin().show_ext_resource('flow_classifier',
                                                      self.resource_id)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        flow_classifier = self.client_plugin().create_ext_resource(
            'flow_classifier', props)
        self.resource_id_set(flow_classifier['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client_plugin().update_ext_resource('flow_classifier',
                                                     prop_diff,
                                                     self.resource_id)

    def handle_delete(self):
        if self.resource_id is None:
            return
        with self.client_plugin().ignore_not_found:
            self.client_plugin().delete_ext_resource('flow_classifier',
                                                     self.resource_id)
Ejemplo n.º 26
0
class Secret(resource.Resource):
    """The resource provides access to the secret/keying stored material.

    A secret is a singular item that stored within Barbican. A secret is
    anything you want it to be; however, the formal use case is a key that you
    wish to store away from prying eyes. Secret may include private keys,
    passwords and so on.
    """

    support_status = support.SupportStatus(version='2014.2')

    default_client_name = 'barbican'

    entity = 'secrets'

    PROPERTIES = (
        NAME,
        PAYLOAD,
        PAYLOAD_CONTENT_TYPE,
        PAYLOAD_CONTENT_ENCODING,
        MODE,
        EXPIRATION,
        ALGORITHM,
        BIT_LENGTH,
        SECRET_TYPE,
    ) = ('name', 'payload', 'payload_content_type', 'payload_content_encoding',
         'mode', 'expiration', 'algorithm', 'bit_length', 'secret_type')

    ATTRIBUTES = (
        STATUS,
        DECRYPTED_PAYLOAD,
    ) = (
        'status',
        'decrypted_payload',
    )

    _SECRET_TYPES = (SYMMETRIC, PUBLIC, PRIVATE, CERTIFICATE, PASSPHRASE,
                     OPAQUE) = ('symmetric', 'public', 'private',
                                'certificate', 'passphrase', 'opaque')

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Human readable name for the secret.'),
        ),
        PAYLOAD:
        properties.Schema(
            properties.Schema.STRING,
            _('The unencrypted plain text of the secret.'),
        ),
        SECRET_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of the secret.'),
            constraints=[
                constraints.AllowedValues(_SECRET_TYPES),
            ],
            support_status=support.SupportStatus(version='5.0.0'),
            default=OPAQUE),
        PAYLOAD_CONTENT_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type/format the secret data is provided in.'),
            constraints=[
                constraints.AllowedValues([
                    'text/plain',
                    'application/octet-stream',
                ]),
            ],
        ),
        PAYLOAD_CONTENT_ENCODING:
        properties.Schema(
            properties.Schema.STRING,
            _('The encoding format used to provide the payload data.'),
            constraints=[
                constraints.AllowedValues([
                    'base64',
                ]),
            ],
        ),
        EXPIRATION:
        properties.Schema(
            properties.Schema.STRING,
            _('The expiration date for the secret in ISO-8601 format.'),
            constraints=[
                constraints.CustomConstraint('expiration'),
            ],
        ),
        ALGORITHM:
        properties.Schema(
            properties.Schema.STRING,
            _('The algorithm type used to generate the secret.'),
        ),
        BIT_LENGTH:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The bit-length of the secret.'),
            constraints=[
                constraints.Range(min=0, ),
            ],
        ),
        MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type/mode of the algorithm associated with the secret '
              'information.'),
        ),
    }

    attributes_schema = {
        STATUS:
        attributes.Schema(_('The status of the secret.'),
                          type=attributes.Schema.STRING),
        DECRYPTED_PAYLOAD:
        attributes.Schema(_('The decrypted secret payload.'),
                          type=attributes.Schema.STRING),
    }

    def handle_create(self):
        info = dict(self.properties)
        secret = self.client().secrets.create(**info)
        secret_ref = secret.store()
        self.resource_id_set(secret_ref)
        return secret_ref

    def validate(self):
        super(Secret, self).validate()

        if self.properties[self.PAYLOAD_CONTENT_TYPE]:
            if not self.properties[self.PAYLOAD]:
                raise exception.ResourcePropertyDependency(
                    prop1=self.PAYLOAD_CONTENT_TYPE, prop2=self.PAYLOAD)

            if (self.properties[self.PAYLOAD_CONTENT_TYPE] ==
                    'application/octet-stream'):
                if not self.properties[self.PAYLOAD_CONTENT_ENCODING]:
                    msg = _("Property unspecified. For '%(value)s' value "
                            "of '%(prop1)s' property, '%(prop2)s' property "
                            "must be specified.") % {
                                'value':
                                self.properties[self.PAYLOAD_CONTENT_TYPE],
                                'prop1': self.PAYLOAD_CONTENT_TYPE,
                                'prop2': self.PAYLOAD_CONTENT_ENCODING
                            }
                    raise exception.StackValidationFailed(message=msg)
                try:
                    base64.b64decode(self.properties[self.PAYLOAD])
                except Exception:
                    msg = _("Invalid %(prop1)s for specified '%(value)s' "
                            "value of '%(prop2)s' property.") % {
                                'prop1': self.PAYLOAD,
                                'value':
                                self.properties[self.PAYLOAD_CONTENT_ENCODING],
                                'prop2': self.PAYLOAD_CONTENT_ENCODING
                            }
                    raise exception.StackValidationFailed(message=msg)

        if (self.properties[self.PAYLOAD_CONTENT_ENCODING] and
            (not self.properties[self.PAYLOAD_CONTENT_TYPE]
             or self.properties[self.PAYLOAD_CONTENT_TYPE] == 'text/plain')):
            raise exception.ResourcePropertyValueDependency(
                prop1=self.PAYLOAD_CONTENT_ENCODING,
                prop2=self.PAYLOAD_CONTENT_TYPE,
                value='application/octet-stream')

    def _resolve_attribute(self, name):
        secret = self.client().secrets.get(self.resource_id)

        if name == self.DECRYPTED_PAYLOAD:
            return secret.payload

        if name == self.STATUS:
            return secret.status
Ejemplo n.º 27
0
class NetworkGateway(neutron.NeutronResource):
    '''
    A resource for the Network Gateway resource in Neutron Network Gateway.
    '''

    PROPERTIES = (
        NAME,
        DEVICES,
        CONNECTIONS,
    ) = (
        'name',
        'devices',
        'connections',
    )

    ATTRIBUTES = (
        DEFAULT,
        SHOW,
    ) = (
        'default',
        'show',
    )

    _DEVICES_KEYS = (
        ID,
        INTERFACE_NAME,
    ) = (
        'id',
        'interface_name',
    )

    _CONNECTIONS_KEYS = (
        NETWORK_ID,
        NETWORK,
        SEGMENTATION_TYPE,
        SEGMENTATION_ID,
    ) = (
        'network_id',
        'network',
        'segmentation_type',
        'segmentation_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          description=_('The name of the network gateway.'),
                          update_allowed=True),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Device info for this network gateway.'),
            required=True,
            constraints=[constraints.Length(min=1)],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ID:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The device id for the network '
                                          'gateway.'),
                                      required=True),
                    INTERFACE_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The interface name for the '
                                          'network gateway.'),
                                      required=True)
                })),
        CONNECTIONS:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Connection info for this network gateway.'),
            default={},
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_ID:
                    properties.Schema(properties.Schema.STRING,
                                      support_status=support.SupportStatus(
                                          support.DEPRECATED,
                                          _('Use property %s.') % NETWORK),
                                      required=False),
                    NETWORK:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The internal network to connect on '
                                          'the network gateway.'),
                                      required=False),
                    SEGMENTATION_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'L2 segmentation strategy on the external '
                            'side of the network gateway.'),
                        default='flat',
                        constraints=[
                            constraints.AllowedValues(('flat', 'vlan'))
                        ]),
                    SEGMENTATION_ID:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        description=_(
                            'The id for L2 segment on the external side '
                            'of the network gateway. Must be specified '
                            'when using vlan.'),
                        constraints=[constraints.Range(0, 4094)])
                }))
    }

    attributes_schema = {
        DEFAULT: attributes.Schema(_("A boolean value of default flag.")),
        SHOW: attributes.Schema(_("All attributes.")),
    }

    def _show_resource(self):
        return self.neutron().show_network_gateway(
            self.resource_id)['network_gateway']

    def validate(self):
        '''
        Validate any of the provided params
        '''
        super(NetworkGateway, self).validate()
        connections = self.properties[self.CONNECTIONS]

        for connection in connections:
            self._validate_depr_property_required(connection, self.NETWORK,
                                                  self.NETWORK_ID)
            segmentation_type = connection[self.SEGMENTATION_TYPE]
            segmentation_id = connection.get(self.SEGMENTATION_ID)

            if segmentation_type == 'vlan' and segmentation_id is None:
                msg = _("segmentation_id must be specified for using vlan")
                raise exception.StackValidationFailed(message=msg)

            if segmentation_type == 'flat' and segmentation_id:
                msg = _("segmentation_id cannot be specified except 0 for "
                        "using flat")
                raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        connections = props.pop(self.CONNECTIONS)
        ret = self.neutron().create_network_gateway({'network_gateway':
                                                     props})['network_gateway']

        for connection in connections:
            neutron_utils.resolve_network(self.neutron(), connection,
                                          self.NETWORK, 'network_id')
            if self.NETWORK in connection.keys():
                connection.pop(self.NETWORK)
            self.neutron().connect_network_gateway(ret['id'], connection)

        self.resource_id_set(ret['id'])

    def handle_delete(self):
        if not self.resource_id:
            return
        client = self.neutron()

        connections = self.properties[self.CONNECTIONS]
        for connection in connections:
            try:
                neutron_utils.resolve_network(self.neutron(), connection,
                                              self.NETWORK, 'network_id')
                if self.NETWORK in connection.keys():
                    connection.pop(self.NETWORK)
                client.disconnect_network_gateway(self.resource_id, connection)
            except NeutronClientException as ex:
                self._handle_not_found_exception(ex)

        try:
            client.delete_network_gateway(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            return self._delete_task()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        props = self.prepare_update_properties(json_snippet)
        connections = props.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(props)
            self.handle_create()
            return
        else:
            props.pop(self.DEVICES, None)

        if self.NAME in prop_diff:
            self.neutron().update_network_gateway(self.resource_id,
                                                  {'network_gateway': props})

        if self.CONNECTIONS in prop_diff:
            for connection in self.properties[self.CONNECTIONS]:
                try:
                    neutron_utils.resolve_network(self.neutron(), connection,
                                                  self.NETWORK, 'network_id')
                    if self.NETWORK in connection.keys():
                        connection.pop(self.NETWORK)
                    self.neutron().disconnect_network_gateway(
                        self.resource_id, connection)
                except NeutronClientException as ex:
                    self._handle_not_found_exception(ex)
            for connection in connections:
                neutron_utils.resolve_network(self.neutron(), connection,
                                              self.NETWORK, 'network_id')
                if self.NETWORK in connection.keys():
                    connection.pop(self.NETWORK)
                self.neutron().connect_network_gateway(self.resource_id,
                                                       connection)
Ejemplo n.º 28
0
                      "A CRON expression property."),
                    constraints=[
                        constraints.CustomConstraint(
                            'cron_expression')
                    ],
                    required=True
                ),
                TIME_CONSTRAINT_DESCRIPTION: properties.Schema(
                    properties.Schema.STRING,
                    _("Description for the time constraint."),
                ),
                DURATION: properties.Schema(
                    properties.Schema.INTEGER,
                    _("Duration for the time constraint."),
                    constraints=[
                        constraints.Range(min=0)
                    ],
                    required=True
                ),
                TIMEZONE: properties.Schema(
                    properties.Schema.STRING,
                    _("Timezone for the time constraint "
                      "(eg. 'Asia/Taipei', 'Europe/Amsterdam')."),
                    constraints=[
                        constraints.CustomConstraint('timezone')
                    ],
                )
            }

        ),
        support_status=support.SupportStatus(version='5.0.0'),
Ejemplo n.º 29
0
class HealthMonitor(neutron.NeutronResource):
    """A resource to handle load balancer health monitors.

    This resource creates and manages Neutron LBaaS v2 healthmonitors,
    which watches status of the load balanced servers.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'lbaasv2'

    entity = 'lbaas_healthmonitor'

    res_info_key = 'healthmonitor'

    # Properties inputs for the resources create/update.
    PROPERTIES = (
        ADMIN_STATE_UP, DELAY, EXPECTED_CODES, HTTP_METHOD,
        MAX_RETRIES, POOL, TIMEOUT, TYPE, URL_PATH, TENANT_ID
    ) = (
        'admin_state_up', 'delay', 'expected_codes', 'http_method',
        'max_retries', 'pool', 'timeout', 'type', 'url_path', 'tenant_id'
    )

    # Supported HTTP methods
    HTTP_METHODS = (
        GET, HEAT, POST, PUT, DELETE, TRACE, OPTIONS,
        CONNECT, PATCH
    ) = (
        'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS',
        'CONNECT', 'PATCH'
    )

    # Supported output attributes of the resources.
    ATTRIBUTES = (POOLS_ATTR) = ('pools')

    properties_schema = {
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the health monitor.'),
            default=True,
            update_allowed=True
        ),
        DELAY: properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum time in milliseconds between regular connections '
              'of the member.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=0)]
        ),
        EXPECTED_CODES: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP status codes expected in response from the '
              'member to declare it healthy. Specify one of the following '
              'values: a single value, such as 200. a list, such as 200, 202. '
              'a range, such as 200-204.'),
            update_allowed=True,
            default='200'
        ),
        HTTP_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP method used for requests by the monitor of type '
              'HTTP.'),
            update_allowed=True,
            default=GET,
            constraints=[constraints.AllowedValues(HTTP_METHODS)]
        ),
        MAX_RETRIES: properties.Schema(
            properties.Schema.INTEGER,
            _('Number of permissible connection failures before changing the '
              'member status to INACTIVE.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=1, max=10)],
        ),
        POOL: properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the load balancing pool.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('neutron.lbaas.pool')
            ]
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of milliseconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.Range(min=0)]
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('One of predefined health monitor types.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        URL_PATH: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP path used in the HTTP request used by the monitor to '
              'test a member health. A valid value is a string the begins '
              'with a forward slash (/).'),
            update_allowed=True,
            default='/'
        ),
        TENANT_ID: properties.Schema(
            properties.Schema.STRING,
            _('ID of the tenant who owns the health monitor.')
        )
    }

    attributes_schema = {
        POOLS_ATTR: attributes.Schema(
            _('The list of Pools related to this monitor.'),
            type=attributes.Schema.LIST
        )
    }

    def __init__(self, name, definition, stack):
        super(HealthMonitor, self).__init__(name, definition, stack)
        self._lb_id = None

    @property
    def lb_id(self):
        if self._lb_id is None:
            client_plugin = self.client_plugin()
            pool_id = client_plugin.find_resourceid_by_name_or_id(
                client_plugin.RES_TYPE_LB_POOL,
                self.properties[self.POOL])
            pool = self.client().show_lbaas_pool(pool_id)['pool']

            listener_id = pool['listeners'][0]['id']
            listener = self.client().show_listener(listener_id)['listener']

            self._lb_id = listener['loadbalancers'][0]['id']
        return self._lb_id

    def _check_lb_status(self):
        return self.client_plugin().check_lb_status(self.lb_id)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())

        self.client_plugin().resolve_pool(
            properties, self.POOL, 'pool_id')

        return properties

    def check_create_complete(self, properties):
        if self.resource_id is None:
            try:
                healthmonitor = self.client().create_lbaas_healthmonitor(
                    {'healthmonitor': properties})['healthmonitor']
                self.resource_id_set(healthmonitor['id'])
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        self._update_called = False
        return prop_diff

    def check_update_complete(self, prop_diff):
        if not prop_diff:
            return True

        if not self._update_called:
            try:
                self.client().update_lbaas_healthmonitor(
                    self.resource_id, {'healthmonitor': prop_diff})
                self._update_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                raise

        return self._check_lb_status()

    def handle_delete(self):
        self._delete_called = False

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        if not self._delete_called:
            try:
                self.client().delete_lbaas_healthmonitor(self.resource_id)
                self._delete_called = True
            except Exception as ex:
                if self.client_plugin().is_invalid(ex):
                    return False
                elif self.client_plugin().is_not_found(ex):
                    return True
                raise

        return self._check_lb_status()
Ejemplo n.º 30
0
 def test_range_max_fail(self):
     r = constraints.Range(max=5, description='a range')
     self.assertRaises(ValueError, r.validate, 6)