Exemplo n.º 1
0
class HealthMonitor(neutron.NeutronResource):
    """
    A resource for managing health monitors for load balancers in Neutron.
    """

    PROPERTIES = (
        DELAY, TYPE, MAX_RETRIES, TIMEOUT, ADMIN_STATE_UP,
        HTTP_METHOD, EXPECTED_CODES, URL_PATH,
    ) = (
        'delay', 'type', 'max_retries', 'timeout', 'admin_state_up',
        'http_method', 'expected_codes', 'url_path',
    )

    properties_schema = {
        DELAY: properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum time in seconds between regular connections of '
              'the member.'),
            required=True,
            update_allowed=True
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('One of predefined health monitor types.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        MAX_RETRIES: properties.Schema(
            properties.Schema.INTEGER,
            _('Number of permissible connection failures before changing the '
              'member status to INACTIVE.'),
            required=True,
            update_allowed=True
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of seconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            required=True,
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the health monitor.'),
            default=True,
            update_allowed=True
        ),
        HTTP_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP method used for requests by the monitor of type '
              'HTTP.'),
            update_allowed=True
        ),
        EXPECTED_CODES: properties.Schema(
            properties.Schema.STRING,
            _('The list of HTTP status codes expected in response from the '
              'member to declare it healthy.'),
            update_allowed=True
        ),
        URL_PATH: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP path used in the HTTP request used by the monitor to '
              'test a member health.'),
            update_allowed=True
        ),
    }

    update_allowed_keys = ('Properties',)

    attributes_schema = {
        'admin_state_up': _('The administrative state of this health '
                            'monitor.'),
        'delay': _('The minimum time in seconds between regular connections '
                   'of the member.'),
        'expected_codes': _('The list of HTTP status codes expected in '
                            'response from the member to declare it healthy.'),
        'http_method': _('The HTTP method used for requests by the monitor of '
                         'type HTTP.'),
        'max_retries': _('Number of permissible connection failures before '
                         'changing the member status to INACTIVE.'),
        'timeout': _('Maximum number of seconds for a monitor to wait for a '
                     'connection to be established before it times out.'),
        'type': _('One of predefined health monitor types.'),
        'url_path': _('The HTTP path used in the HTTP request used by the '
                      'monitor to test a member health.'),
        'tenant_id': _('Tenant owning the health monitor.'),
        'show': _('All attributes.'),
    }

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        health_monitor = self.neutron().create_health_monitor(
            {'health_monitor': properties})['health_monitor']
        self.resource_id_set(health_monitor['id'])

    def _show_resource(self):
        return self.neutron().show_health_monitor(
            self.resource_id)['health_monitor']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.neutron().update_health_monitor(
                self.resource_id, {'health_monitor': prop_diff})

    def handle_delete(self):
        try:
            self.neutron().delete_health_monitor(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            return self._delete_task()
Exemplo n.º 2
0
class MultipartMime(software_config.SoftwareConfig):
    """Assembles a collection of software configurations as a multi-part mime.

    Parts in the message can be populated with inline configuration or
    references to other config resources. If the referenced resource is itself
    a valid multi-part mime message, that will be broken into parts and
    those parts appended to this message.

    The resulting multi-part mime message will be stored by the configs API
    and can be referenced in properties such as OS::Nova::Server user_data.

    This resource is generally used to build a list of cloud-init
    configuration elements including scripts and cloud-config. Since
    cloud-init is boot-only configuration, any changes to the definition
    will result in the replacement of all servers which reference it.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        PARTS, CONFIG, FILENAME, TYPE, SUBTYPE
    ) = (
        'parts', 'config', 'filename', 'type', 'subtype'
    )

    TYPES = (
        TEXT, MULTIPART
    ) = (
        'text', 'multipart'
    )

    properties_schema = {
        PARTS: properties.Schema(
            properties.Schema.LIST,
            _('Parts belonging to this message.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CONFIG: properties.Schema(
                        properties.Schema.STRING,
                        _('Content of part to attach, either inline or by '
                          'referencing the ID of another software config '
                          'resource.'),
                        required=True
                    ),
                    FILENAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Optional filename to associate with part.')
                    ),
                    TYPE: properties.Schema(
                        properties.Schema.STRING,
                        _('Whether the part content is text or multipart.'),
                        default=TEXT,
                        constraints=[constraints.AllowedValues(TYPES)]
                    ),
                    SUBTYPE: properties.Schema(
                        properties.Schema.STRING,
                        _('Optional subtype to specify with the type.')
                    ),
                }
            )
        )
    }

    message = None

    def handle_create(self):
        props = {
            rpc_api.SOFTWARE_CONFIG_NAME: self.physical_resource_name(),
            rpc_api.SOFTWARE_CONFIG_CONFIG: self.get_message(),
            rpc_api.SOFTWARE_CONFIG_GROUP: 'Heat::Ungrouped'
        }
        sc = self.rpc_client().create_software_config(self.context, **props)
        self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID])

    def get_message(self):
        if self.message:
            return self.message

        subparts = []
        for item in self.properties[self.PARTS]:
            config = item.get(self.CONFIG)
            part_type = item.get(self.TYPE, self.TEXT)
            part = config

            if uuidutils.is_uuid_like(config):
                try:
                    sc = self.rpc_client().show_software_config(
                        self.context, config)
                except Exception as ex:
                    self.rpc_client().ignore_error_named(ex, 'NotFound')
                else:
                    part = sc[rpc_api.SOFTWARE_CONFIG_CONFIG]

            if part_type == self.MULTIPART:
                self._append_multiparts(subparts, part)
            else:
                filename = item.get(self.FILENAME, '')
                subtype = item.get(self.SUBTYPE, '')
                self._append_part(subparts, part, subtype, filename)

        mime_blob = multipart.MIMEMultipart(_subparts=subparts)
        self.message = mime_blob.as_string()
        return self.message

    @staticmethod
    def _append_multiparts(subparts, multi_part):
        multi_parts = email.message_from_string(multi_part)
        if not multi_parts or not multi_parts.is_multipart():
            return

        for part in multi_parts.get_payload():
            MultipartMime._append_part(
                subparts,
                part.get_payload(),
                part.get_content_subtype(),
                part.get_filename())

    @staticmethod
    def _append_part(subparts, part, subtype, filename):
        if not subtype and filename:
            subtype = os.path.splitext(filename)[0]

        msg = MultipartMime._create_message(part, subtype, filename)
        subparts.append(msg)

    @staticmethod
    def _create_message(part, subtype, filename):
        charset = 'us-ascii'
        try:
            part.encode(charset)
        except UnicodeEncodeError:
            charset = 'utf-8'
        msg = (text.MIMEText(part, _subtype=subtype,
                             _charset=charset)
               if subtype else text.MIMEText(part, _charset=charset))

        if filename:
            msg.add_header('Content-Disposition', 'attachment',
                           filename=filename)
        return msg
Exemplo n.º 3
0
Arquivo: user.py Projeto: zzjeric/heat
class User(stack_user.StackUser):
    PROPERTIES = (
        PATH,
        GROUPS,
        LOGIN_PROFILE,
        POLICIES,
    ) = (
        'Path',
        'Groups',
        'LoginProfile',
        'Policies',
    )

    _LOGIN_PROFILE_KEYS = (LOGIN_PROFILE_PASSWORD, ) = ('Password', )

    properties_schema = {
        PATH:
        properties.Schema(properties.Schema.STRING, _('Not Implemented.')),
        GROUPS:
        properties.Schema(properties.Schema.LIST, _('Not Implemented.')),
        LOGIN_PROFILE:
        properties.Schema(properties.Schema.MAP,
                          _('A login profile for the user.'),
                          schema={
                              LOGIN_PROFILE_PASSWORD:
                              properties.Schema(properties.Schema.STRING),
                          }),
        POLICIES:
        properties.Schema(properties.Schema.LIST,
                          _('Access policies to apply to the user.')),
    }

    def _validate_policies(self, policies):
        for policy in (policies or []):
            # When we support AWS IAM style policies, we will have to accept
            # either a ref to an AWS::IAM::Policy defined in the stack, or
            # and embedded dict describing the policy directly, but for now
            # we only expect this list to contain strings, which must map
            # to an OS::Heat::AccessPolicy in this stack
            # If a non-string (e.g embedded IAM dict policy) is passed, we
            # ignore the policy (don't reject it because we previously ignored
            # and we don't want to break templates which previously worked
            if not isinstance(policy, six.string_types):
                LOG.debug(
                    "Ignoring policy %s, must be string "
                    "resource name", policy)
                continue

            try:
                policy_rsrc = self.stack[policy]
            except KeyError:
                LOG.debug(
                    "Policy %(policy)s does not exist in stack "
                    "%(stack)s", {
                        'policy': policy,
                        'stack': self.stack.name
                    })
                return False

            if not callable(getattr(policy_rsrc, 'access_allowed', None)):
                LOG.debug("Policy %s is not an AccessPolicy resource", policy)
                return False

        return True

    def handle_create(self):
        profile = self.properties[self.LOGIN_PROFILE]
        if profile and self.LOGIN_PROFILE_PASSWORD in profile:
            self.password = profile[self.LOGIN_PROFILE_PASSWORD]

        if self.properties[self.POLICIES]:
            if not self._validate_policies(self.properties[self.POLICIES]):
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=self.POLICIES)

        super(User, self).handle_create()
        self.resource_id_set(self._get_user_id())

    def get_reference_id(self):
        return self.physical_resource_name_or_FnGetRefId()

    def access_allowed(self, resource_name):
        policies = (self.properties[self.POLICIES] or [])
        for policy in policies:
            if not isinstance(policy, six.string_types):
                LOG.debug(
                    "Ignoring policy %s, must be string "
                    "resource name", policy)
                continue
            policy_rsrc = self.stack[policy]
            if not policy_rsrc.access_allowed(resource_name):
                return False
        return True
Exemplo n.º 4
0
class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
        COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
        HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
        INSTANCE_ID,
    ) = (
        'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
        'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
        'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
        'InstanceId',
    )

    _TAG_KEYS = (
        TAG_KEY, TAG_VALUE,
    ) = (
        'Key', 'Value',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE
    ) = (
        'AutoScalingRollingUpdate'
    )

    _ROLLING_UPDATE_SCHEMA_KEYS = (
        MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
    ) = (
        'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
    )

    ATTRIBUTES = (
        INSTANCE_LIST,
    ) = (
        'InstanceList',
    )

    properties_schema = {
        AVAILABILITY_ZONES: properties.Schema(
            properties.Schema.LIST,
            _('Not Implemented.'),
            required=True
        ),
        LAUNCH_CONFIGURATION_NAME: properties.Schema(
            properties.Schema.STRING,
            _('The reference to a LaunchConfiguration resource.'),
            update_allowed=True
        ),
        INSTANCE_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of an existing instance to use to '
              'create the Auto Scaling group. If specify this property, '
              'will create the group use an existing instance instead of '
              'a launch configuration.'),
            constraints=[
                constraints.CustomConstraint("nova.server")
            ]
        ),
        MAX_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of instances in the group.'),
            required=True,
            update_allowed=True
        ),
        MIN_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Minimum number of instances in the group.'),
            required=True,
            update_allowed=True
        ),
        COOLDOWN: properties.Schema(
            properties.Schema.INTEGER,
            _('Cooldown period, in seconds.'),
            update_allowed=True
        ),
        DESIRED_CAPACITY: properties.Schema(
            properties.Schema.INTEGER,
            _('Desired initial number of instances.'),
            update_allowed=True
        ),
        HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
            properties.Schema.INTEGER,
            _('Not Implemented.'),
            implemented=False
        ),
        HEALTH_CHECK_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Not Implemented.'),
            constraints=[
                constraints.AllowedValues(['EC2', 'ELB']),
            ],
            implemented=False
        ),
        LOAD_BALANCER_NAMES: properties.Schema(
            properties.Schema.LIST,
            _('List of LoadBalancer resources.')
        ),
        VPCZONE_IDENTIFIER: properties.Schema(
            properties.Schema.LIST,
            _('Use only with Neutron, to list the internal subnet to '
              'which the instance will be attached; '
              'needed only if multiple exist; '
              'list length must be exactly 1.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('UUID of the internal subnet to which the instance '
                  'will be attached.')
            )
        ),
        TAGS: properties.Schema(
            properties.Schema.LIST,
            _('Tags to attach to this group.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TAG_KEY: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                    TAG_VALUE: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                },
            )
        ),
    }

    attributes_schema = {
        INSTANCE_LIST: attributes.Schema(
            _("A comma-delimited list of server ip addresses. "
              "(Heat extension)."),
            type=attributes.Schema.STRING
        ),
    }

    rolling_update_schema = {
        MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
                                                    default=0),
        MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
                                          default=1),
        PAUSE_TIME: properties.Schema(properties.Schema.STRING,
                                      default='PT0S')
    }

    update_policy_schema = {
        ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
                                          schema=rolling_update_schema)
    }

    def handle_create(self):
        return self.create_with_template(self.child_template())

    def _make_launch_config_resource(self, name, props):
        lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
        lc_res_def = rsrc_defn.ResourceDefinition(name,
                                                  lc_res_type,
                                                  props)
        lc_res = resource.Resource(name, lc_res_def, self.stack)
        return lc_res

    def _get_conf_properties(self):
        instance_id = self.properties.get(self.INSTANCE_ID)
        if instance_id:
            server = self.client_plugin('nova').get_server(instance_id)
            instance_props = {
                'ImageId': server.image['id'],
                'InstanceType': server.flavor['id'],
                'KeyName': server.key_name,
                'SecurityGroups': [sg['name']
                                   for sg in server.security_groups]
            }
            conf = self._make_launch_config_resource(self.name,
                                                     instance_props)
            props = function.resolve(conf.properties.data)
        else:
            conf, props = super(AutoScalingGroup, self)._get_conf_properties()

        vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
        if vpc_zone_ids:
            props['SubnetId'] = vpc_zone_ids[0]

        return conf, props

    def check_create_complete(self, task):
        """Update cooldown timestamp after create succeeds."""
        done = super(AutoScalingGroup, self).check_create_complete(task)
        cooldown = self.properties[self.COOLDOWN]
        if done:
            self._finished_scaling(cooldown,
                                   "%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
                                                grouputils.get_size(self)))
        return done

    def check_update_complete(self, cookie):
        """Update the cooldown timestamp after update succeeds."""
        done = super(AutoScalingGroup, self).check_update_complete(cookie)
        cooldown = self.properties[self.COOLDOWN]
        if done:
            self._finished_scaling(cooldown,
                                   "%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
                                                grouputils.get_size(self)))
        return done

    def _get_new_capacity(self, capacity,
                          adjustment,
                          adjustment_type=sc_util.CFN_EXACT_CAPACITY,
                          min_adjustment_step=None):
        lower = self.properties[self.MIN_SIZE]
        upper = self.properties[self.MAX_SIZE]
        return sc_util.calculate_new_capacity(capacity, adjustment,
                                              adjustment_type,
                                              min_adjustment_step,
                                              lower, upper)

    def resize(self, capacity):
        try:
            super(AutoScalingGroup, self).resize(capacity)
        finally:
            # allow InstanceList to be re-resolved
            self.clear_stored_attributes()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Updates self.properties, if Properties has changed.

        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if tmpl_diff:
            # parse update policy
            if tmpl_diff.update_policy_changed():
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff:
            # Replace instances first if launch configuration has changed
            self._try_rolling_update(prop_diff)

        # Update will happen irrespective of whether auto-scaling
        # is in progress or not.
        capacity = grouputils.get_size(self)
        desired_capacity = self.properties[self.DESIRED_CAPACITY] or capacity
        new_capacity = self._get_new_capacity(capacity, desired_capacity)
        self.resize(new_capacity)

    def adjust(self, adjustment,
               adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
               min_adjustment_step=None, cooldown=None):
        """Adjust the size of the scaling group if the cooldown permits."""
        if self.status != self.COMPLETE:
            LOG.info("%s NOT performing scaling adjustment, "
                     "when status is not COMPLETE", self.name)
            raise resource.NoActionRequired

        capacity = grouputils.get_size(self)
        new_capacity = self._get_new_capacity(capacity, adjustment,
                                              adjustment_type,
                                              min_adjustment_step)
        if new_capacity == capacity:
            LOG.info("%s NOT performing scaling adjustment, "
                     "as there is no change in capacity.", self.name)
            raise resource.NoActionRequired

        if cooldown is None:
            cooldown = self.properties[self.COOLDOWN]

        self._check_scaling_allowed(cooldown)

        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()},
            'suffix': 'start',
        }
        size_changed = False
        try:
            notification.send(**notif)
            try:
                self.resize(new_capacity)
            except Exception as resize_ex:
                with excutils.save_and_reraise_exception():
                    try:
                        notif.update({'suffix': 'error',
                                      'message': str(resize_ex),
                                      'capacity': grouputils.get_size(self),
                                      })
                        notification.send(**notif)
                    except Exception:
                        LOG.exception('Failed sending error notification')
            else:
                size_changed = True
                notif.update({
                    'suffix': 'end',
                    'capacity': new_capacity,
                    'message': _("End resizing the group %(group)s") % {
                        'group': notif['groupname']},
                })
                notification.send(**notif)
        except Exception:
            LOG.error("Error in performing scaling adjustment for "
                      "group %s.", self.name)
            raise
        finally:
            self._finished_scaling(cooldown,
                                   "%s : %s" % (adjustment_type, adjustment),
                                   size_changed=size_changed)

    def _tags(self):
        """Add Identifying Tags to all servers in the group.

        This is so the Dimensions received from cfn-push-stats all include
        the groupname and stack id.
        Note: the group name must match what is returned from FnGetRefId
        """
        autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
                            self.TAG_VALUE: self.FnGetRefId()}]
        return super(AutoScalingGroup, self)._tags() + autoscaling_tag

    def validate(self):
        # check validity of group size
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]

        if max_size < min_size:
            msg = _("MinSize can not be greater than MaxSize")
            raise exception.StackValidationFailed(message=msg)

        if min_size < 0:
            msg = _("The size of AutoScalingGroup can not be less than zero")
            raise exception.StackValidationFailed(message=msg)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            desired_capacity = self.properties[self.DESIRED_CAPACITY]
            if desired_capacity < min_size or desired_capacity > max_size:
                msg = _("DesiredCapacity must be between MinSize and MaxSize")
                raise exception.StackValidationFailed(message=msg)

        # TODO(pasquier-s): once Neutron is able to assign subnets to
        # availability zones, it will be possible to specify multiple subnets.
        # For now, only one subnet can be specified. The bug #1096017 tracks
        # this issue.
        if (self.properties.get(self.VPCZONE_IDENTIFIER) and
                len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
            raise exception.NotSupported(feature=_("Anything other than one "
                                         "VPCZoneIdentifier"))
        # validate properties InstanceId and LaunchConfigurationName
        # for aws auto scaling group.
        # should provide just only one of
        if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
            instanceId = self.properties.get(self.INSTANCE_ID)
            launch_config = self.properties.get(
                self.LAUNCH_CONFIGURATION_NAME)
            if bool(instanceId) == bool(launch_config):
                msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
                        "must be provided.")
                raise exception.StackValidationFailed(message=msg)

        super(AutoScalingGroup, self).validate()

    def child_template(self):
        if self.properties[self.DESIRED_CAPACITY]:
            num_instances = self.properties[self.DESIRED_CAPACITY]
        else:
            num_instances = self.properties[self.MIN_SIZE]
        return self._create_template(num_instances)
Exemplo n.º 5
0
class InstanceGroup(stack_resource.StackResource):
    """An instance group that can scale arbitrary instances.

    A resource allowing for the creating number of defined with
    AWS::AutoScaling::LaunchConfiguration instances. Allows to associate
    scaled resources with loadbalancer resources.
    """

    PROPERTIES = (
        AVAILABILITY_ZONES,
        LAUNCH_CONFIGURATION_NAME,
        SIZE,
        LOAD_BALANCER_NAMES,
        TAGS,
    ) = (
        'AvailabilityZones',
        'LaunchConfigurationName',
        'Size',
        'LoadBalancerNames',
        'Tags',
    )

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    _ROLLING_UPDATE_SCHEMA_KEYS = (MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE,
                                   PAUSE_TIME) = ('MinInstancesInService',
                                                  'MaxBatchSize', 'PauseTime')

    _UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE, ) = ('RollingUpdate', )

    ATTRIBUTES = (INSTANCE_LIST, ) = ('InstanceList', )

    (OUTPUT_MEMBER_IDS, ) = ('references', )

    properties_schema = {
        AVAILABILITY_ZONES:
        properties.Schema(properties.Schema.LIST,
                          _('Not Implemented.'),
                          required=True),
        LAUNCH_CONFIGURATION_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The reference to a LaunchConfiguration resource.'),
            required=True,
            update_allowed=True),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Desired number of instances.'),
                          required=True,
                          update_allowed=True),
        LOAD_BALANCER_NAMES:
        properties.Schema(properties.Schema.LIST,
                          _('List of LoadBalancer resources.')),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          _('Tags to attach to this group.'),
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    _('Tag key.'),
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    _('Tag value.'),
                                                    required=True),
                              },
                          )),
    }

    attributes_schema = {
        INSTANCE_LIST:
        attributes.Schema(_("A comma-delimited list of server ip addresses. "
                            "(Heat extension)."),
                          type=attributes.Schema.STRING),
    }
    rolling_update_schema = {
        MIN_INSTANCES_IN_SERVICE:
        properties.Schema(properties.Schema.INTEGER, default=0),
        MAX_BATCH_SIZE:
        properties.Schema(properties.Schema.INTEGER, default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.STRING, default='PT0S')
    }
    update_policy_schema = {
        ROLLING_UPDATE:
        properties.Schema(properties.Schema.MAP, schema=rolling_update_schema)
    }

    def validate(self):
        """Add validation for update_policy."""
        self.validate_launchconfig()
        super(InstanceGroup, self).validate()

        if self.update_policy is not None:
            policy_name = self.ROLLING_UPDATE
            if (policy_name in self.update_policy
                    and self.update_policy[policy_name] is not None):
                pause_time = self.update_policy[policy_name][self.PAUSE_TIME]
                if iso8601utils.parse_isoduration(pause_time) > 3600:
                    msg = _('Maximum %s is 1 hour.') % self.PAUSE_TIME
                    raise ValueError(msg)

    def validate_launchconfig(self):
        # It seems to be a common error to not have a dependency on the
        # launchconfiguration. This can happen if the actual resource
        # name is used instead of {get_resource: launch_conf} and no
        # depends_on is used.

        conf_refid = self.properties.get(self.LAUNCH_CONFIGURATION_NAME)
        if conf_refid:
            conf = self.stack.resource_by_refid(conf_refid)
            if conf is None:
                raise ValueError(
                    _('%(lc)s (%(ref)s)'
                      ' reference can not be found.') %
                    dict(lc=self.LAUNCH_CONFIGURATION_NAME, ref=conf_refid))
            if self.name not in conf.required_by():
                raise ValueError(
                    _('%(lc)s (%(ref)s)'
                      ' requires a reference to the'
                      ' configuration not just the name of the'
                      ' resource.') %
                    dict(lc=self.LAUNCH_CONFIGURATION_NAME, ref=conf_refid))

    def handle_create(self):
        """Create a nested stack and add the initial resources to it."""
        num_instances = self.properties[self.SIZE]
        initial_template = self._create_template(num_instances)
        return self.create_with_template(initial_template)

    def check_create_complete(self, task):
        """When stack creation is done, update the loadbalancer.

        If any instances failed to be created, delete them.
        """
        done = super(InstanceGroup, self).check_create_complete(task)
        if done:
            self._lb_reload()
        return done

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Updates self.properties, if Properties has changed.

        If Properties has changed, update self.properties, so we
        get the new values during any subsequent adjustment.
        """
        if tmpl_diff:
            # parse update policy
            if tmpl_diff.update_policy_changed():
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff:
            # Replace instances first if launch configuration has changed
            self._try_rolling_update(prop_diff)

        # Get the current capacity, we may need to adjust if
        # Size has changed
        if self.properties[self.SIZE] is not None:
            self.resize(self.properties[self.SIZE])
        else:
            curr_size = grouputils.get_size(self)
            self.resize(curr_size)

    def _tags(self):
        """Make sure that we add a tag that Ceilometer can pick up.

        These need to be prepended with 'metering.'.
        """
        tags = self.properties.get(self.TAGS) or []
        for t in tags:
            if t[self.TAG_KEY].startswith('metering.'):
                # the user has added one, don't add another.
                return tags
        return tags + [{
            self.TAG_KEY: 'metering.groupname',
            self.TAG_VALUE: self.FnGetRefId()
        }]

    def _get_conf_properties(self):
        conf_refid = self.properties[self.LAUNCH_CONFIGURATION_NAME]
        conf = self.stack.resource_by_refid(conf_refid)
        c_props = conf.frozen_definition().properties(conf.properties_schema,
                                                      conf.context)
        props = {k: v for k, v in c_props.items() if k in c_props.data}
        for key in [conf.BLOCK_DEVICE_MAPPINGS, conf.NOVA_SCHEDULER_HINTS]:
            if props.get(key) is not None:
                props[key] = [{
                    k: v
                    for k, v in prop.items() if k in c_props.data[key][idx]
                } for idx, prop in enumerate(props[key])]
        if 'InstanceId' in props:
            props = conf.rebuild_lc_properties(props['InstanceId'])
        props['Tags'] = self._tags()
        # if the launch configuration is created from an existing instance.
        # delete the 'InstanceId' property
        props.pop('InstanceId', None)

        return conf, props

    def _get_resource_definition(self):
        conf, props = self._get_conf_properties()
        return rsrc_defn.ResourceDefinition(None, SCALED_RESOURCE_TYPE, props,
                                            conf.t.metadata())

    def _create_template(self,
                         num_instances,
                         num_replace=0,
                         template_version=('HeatTemplateFormatVersion',
                                           '2012-12-12')):
        """Create a template to represent autoscaled instances.

        Also see heat.scaling.template.member_definitions.
        """
        instance_definition = self._get_resource_definition()
        old_resources = grouputils.get_member_definitions(self,
                                                          include_failed=True)
        definitions = list(
            template.member_definitions(old_resources, instance_definition,
                                        num_instances, num_replace,
                                        short_id.generate_id))

        child_env = environment.get_child_environment(
            self.stack.env,
            self.child_params(),
            item_to_remove=self.resource_info)

        tmpl = template.make_template(definitions,
                                      version=template_version,
                                      child_env=child_env)

        # Subclasses use HOT templates
        att_func, res_func = 'get_attr', 'get_resource'
        if att_func not in tmpl.functions or res_func not in tmpl.functions:
            att_func, res_func = 'Fn::GetAtt', 'Ref'
        get_attr = functools.partial(tmpl.functions[att_func], None, att_func)
        get_res = functools.partial(tmpl.functions[res_func], None, res_func)
        for odefn in self._nested_output_defns([k for k, d in definitions],
                                               get_attr, get_res):
            tmpl.add_output(odefn)

        return tmpl

    def _try_rolling_update(self, prop_diff):
        if (self.update_policy[self.ROLLING_UPDATE]
                and self.LAUNCH_CONFIGURATION_NAME in prop_diff):
            policy = self.update_policy[self.ROLLING_UPDATE]
            pause_sec = iso8601utils.parse_isoduration(policy[self.PAUSE_TIME])
            self._replace(policy[self.MIN_INSTANCES_IN_SERVICE],
                          policy[self.MAX_BATCH_SIZE], pause_sec)

    def _update_timeout(self, batch_cnt, pause_sec):
        total_pause_time = pause_sec * max(batch_cnt - 1, 0)
        if total_pause_time >= self.stack.timeout_secs():
            msg = _('The current update policy will result in stack update '
                    'timeout.')
            raise ValueError(msg)
        return self.stack.timeout_secs() - total_pause_time

    def _replace(self, min_in_service, batch_size, pause_sec):
        """Replace the instances in the group.

        Replace the instances in the group using updated launch configuration.
        """
        def changing_instances(old_tmpl, new_tmpl):
            updated = set(new_tmpl.resource_definitions(None).items())
            if old_tmpl is not None:
                current = set(old_tmpl.resource_definitions(None).items())
                changing = current ^ updated
            else:
                changing = updated
            # includes instances to be updated and deleted
            return set(k for k, v in changing)

        def pause_between_batch():
            while True:
                try:
                    yield
                except scheduler.Timeout:
                    return

        group_data = self._group_data()
        old_template = group_data.template()

        capacity = group_data.size(include_failed=True)
        batches = list(self._get_batches(capacity, batch_size, min_in_service))

        update_timeout = self._update_timeout(len(batches), pause_sec)

        try:
            for index, (total_capacity, efft_bat_sz) in enumerate(batches):
                template = self._create_template(total_capacity, efft_bat_sz)
                self._lb_reload(exclude=changing_instances(
                    old_template, template),
                                refresh_data=False)
                updater = self.update_with_template(template)
                checker = scheduler.TaskRunner(self._check_for_completion,
                                               updater)
                checker(timeout=update_timeout)
                old_template = template
                if index < (len(batches) - 1) and pause_sec > 0:
                    self._lb_reload()
                    waiter = scheduler.TaskRunner(pause_between_batch)
                    waiter(timeout=pause_sec)
        finally:
            self._group_data(refresh=True)
            self._lb_reload()

    @staticmethod
    def _get_batches(capacity, batch_size, min_in_service):
        """Return an iterator over the batches in a batched update.

        Each batch is a tuple comprising the total size of the group after
        processing the batch, and the number of members that can receive the
        new definition in that batch (either by creating a new member or
        updating an existing one).
        """

        efft_capacity = capacity
        updated = 0

        while rolling_update.needs_update(capacity, efft_capacity, updated):
            batch = rolling_update.next_batch(capacity, efft_capacity, updated,
                                              batch_size, min_in_service)
            yield batch
            efft_capacity, num_updates = batch
            updated += num_updates

    def _check_for_completion(self, updater):
        while not self.check_update_complete(updater):
            yield

    def resize(self, new_capacity):
        """Resize the instance group to the new capacity.

        When shrinking, the oldest instances will be removed.
        """
        new_template = self._create_template(new_capacity)
        try:
            updater = self.update_with_template(new_template)
            checker = scheduler.TaskRunner(self._check_for_completion, updater)
            checker(timeout=self.stack.timeout_secs())
        finally:
            # Reload the LB in any case, so it's only pointing at healthy
            # nodes.
            self._lb_reload()

    def _lb_reload(self, exclude=frozenset(), refresh_data=True):
        lb_names = self.properties.get(self.LOAD_BALANCER_NAMES) or []
        if lb_names:
            if refresh_data:
                self._outputs = None
            try:
                all_refids = self.get_output(self.OUTPUT_MEMBER_IDS)
            except (exception.NotFound,
                    exception.TemplateOutputError) as op_err:
                LOG.debug('Falling back to grouputils due to %s', op_err)
                if refresh_data:
                    self._nested = None
                instances = grouputils.get_members(self)
                all_refids = {i.name: i.FnGetRefId() for i in instances}
                names = [i.name for i in instances]
            else:
                group_data = self._group_data(refresh=refresh_data)
                names = group_data.member_names(include_failed=False)

            id_list = [
                all_refids[n] for n in names
                if n not in exclude and n in all_refids
            ]
            lbs = [self.stack[name] for name in lb_names]
            lbutils.reconfigure_loadbalancers(lbs, id_list)

    def get_reference_id(self):
        return self.physical_resource_name_or_FnGetRefId()

    def _group_data(self, refresh=False):
        """Return a cached GroupInspector object for the nested stack."""
        if refresh or getattr(self, '_group_inspector', None) is None:
            inspector = grouputils.GroupInspector.from_parent_resource(self)
            self._group_inspector = inspector
        return self._group_inspector

    def _resolve_attribute(self, name):
        """Resolves the resource's attributes.

        Heat extension: "InstanceList" returns comma delimited list of server
        ip addresses.
        """
        if name == self.INSTANCE_LIST:

            def listify(ips):
                return u','.join(ips) or None

            try:
                output = self.get_output(name)
            except (exception.NotFound,
                    exception.TemplateOutputError) as op_err:
                LOG.debug('Falling back to grouputils due to %s', op_err)
            else:
                if isinstance(output, dict):
                    names = self._group_data().member_names(False)
                    return listify(output[n] for n in names if n in output)
                else:
                    LOG.debug('Falling back to grouputils due to '
                              'old (list-style) output format')

            return listify(
                inst.FnGetAtt('PublicIp') or '0.0.0.0'
                for inst in grouputils.get_members(self))

    def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
        for attr in self.referenced_attrs():
            if isinstance(attr, six.string_types):
                key = attr
            else:
                key = attr[0]

            if key == self.INSTANCE_LIST:
                value = {
                    r: get_attr_fn([r, 'PublicIp'])
                    for r in resource_names
                }
                yield output.OutputDefinition(key, value)

        member_ids_value = {r: get_res_fn(r) for r in resource_names}
        yield output.OutputDefinition(self.OUTPUT_MEMBER_IDS, member_ids_value)

    def child_template(self):
        num_instances = int(self.properties[self.SIZE])
        return self._create_template(num_instances)

    def child_template_files(self, child_env):
        is_rolling_update = (self.action == self.UPDATE
                             and self.update_policy[self.ROLLING_UPDATE])
        return grouputils.get_child_template_files(self.context, self.stack,
                                                   is_rolling_update,
                                                   self.old_template_id)

    def child_params(self):
        """Return the environment for the nested stack."""
        return {
            environment_format.PARAMETERS: {},
            environment_format.RESOURCE_REGISTRY: {
                SCALED_RESOURCE_TYPE: 'AWS::EC2::Instance',
            },
        }

    def get_nested_parameters_stack(self):
        """Return a nested group of size 1 for validation."""
        child_template = self._create_template(1)
        params = self.child_params()
        name = "%s-%s" % (self.stack.name, self.name)
        return self._parse_nested_stack(name, child_template, params)
Exemplo n.º 6
0
class ElasticIpAssociation(resource.Resource):
    PROPERTIES = (
        INSTANCE_ID,
        EIP,
        ALLOCATION_ID,
        NETWORK_INTERFACE_ID,
    ) = (
        'InstanceId',
        'EIP',
        'AllocationId',
        'NetworkInterfaceId',
    )

    properties_schema = {
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP specified by EIP property.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.server')]),
        EIP:
        properties.Schema(properties.Schema.STRING,
                          _('EIP address to associate with instance.'),
                          update_allowed=True),
        ALLOCATION_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Allocation ID for VPC EIP address.'),
                          update_allowed=True),
        NETWORK_INTERFACE_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Network interface ID to associate with EIP.'),
                          update_allowed=True),
    }

    def FnGetRefId(self):
        return self.physical_resource_name_or_FnGetRefId()

    def validate(self):
        '''
        Validate any of the provided parameters
        '''
        super(ElasticIpAssociation, self).validate()
        eip = self.properties[self.EIP]
        allocation_id = self.properties[self.ALLOCATION_ID]
        instance_id = self.properties[self.INSTANCE_ID]
        ni_id = self.properties[self.NETWORK_INTERFACE_ID]
        # to check EIP and ALLOCATION_ID, should provide one of
        if bool(eip) == bool(allocation_id):
            msg = _("Either 'EIP' or 'AllocationId' must be provided.")
            raise exception.StackValidationFailed(message=msg)
        # to check if has EIP, also should specify InstanceId
        if eip and not instance_id:
            msg = _("Must specify 'InstanceId' if you specify 'EIP'.")
            raise exception.StackValidationFailed(message=msg)
        # to check InstanceId and NetworkInterfaceId, should provide
        # at least one
        if not instance_id and not ni_id:
            raise exception.PropertyUnspecifiedError('InstanceId',
                                                     'NetworkInterfaceId')

    def _get_port_info(self, ni_id=None, instance_id=None):
        port_id = None
        port_rsrc = None
        if ni_id:
            port_rsrc = self.neutron().list_ports(id=ni_id)['ports'][0]
            port_id = ni_id
        elif instance_id:
            ports = self.neutron().list_ports(device_id=instance_id)
            port_rsrc = ports['ports'][0]
            port_id = port_rsrc['id']

        return port_id, port_rsrc

    def _neutron_add_gateway_router(self, float_id, network_id):
        router = vpc.VPC.router_for_vpc(self.neutron(), network_id)
        if router is not None:
            floatingip = self.neutron().show_floatingip(float_id)
            floating_net_id = floatingip['floatingip']['floating_network_id']
            self.neutron().add_gateway_router(router['id'],
                                              {'network_id': floating_net_id})

    def _neutron_update_floating_ip(self,
                                    allocationId,
                                    port_id=None,
                                    ignore_not_found=False):
        try:
            self.neutron().update_floatingip(
                allocationId, {'floatingip': {
                    'port_id': port_id
                }})
        except Exception as e:
            if ignore_not_found:
                self.client_plugin('neutron').ignore_not_found(e)
            else:
                raise

    def _nova_remove_floating_ip(self,
                                 instance_id,
                                 eip,
                                 ignore_not_found=False):
        server = None
        try:
            server = self.nova().servers.get(instance_id)
            server.remove_floating_ip(eip)
        except Exception as e:
            is_not_found = self.client_plugin('nova').is_not_found(e)
            iue = self.client_plugin('nova').is_unprocessable_entity(e)
            if ((not ignore_not_found and is_not_found)
                    or (not is_not_found and not iue)):
                raise

        return server

    def _floatingIp_detach(self,
                           nova_ignore_not_found=False,
                           neutron_ignore_not_found=False):
        eip = self.properties[self.EIP]
        allocation_id = self.properties[self.ALLOCATION_ID]
        instance_id = self.properties[self.INSTANCE_ID]
        server = None
        if eip:
            # if has eip_old, to remove the eip_old from the instance
            server = self._nova_remove_floating_ip(instance_id, eip,
                                                   nova_ignore_not_found)
        else:
            # if hasn't eip_old, to update neutron floatingIp
            self._neutron_update_floating_ip(allocation_id, None,
                                             neutron_ignore_not_found)

        return server

    def _handle_update_eipInfo(self, prop_diff):
        eip_update = prop_diff.get(self.EIP)
        allocation_id_update = prop_diff.get(self.ALLOCATION_ID)
        instance_id = self.properties[self.INSTANCE_ID]
        ni_id = self.properties[self.NETWORK_INTERFACE_ID]
        if eip_update:
            server = self._floatingIp_detach(neutron_ignore_not_found=True)
            if server:
                # then to attach the eip_update to the instance
                server.add_floating_ip(eip_update)
                self.resource_id_set(eip_update)
        elif allocation_id_update:
            self._floatingIp_detach(nova_ignore_not_found=True)
            port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
            if not port_id or not port_rsrc:
                LOG.error(_LE('Port not specified.'))
                raise exception.NotFound(
                    _('Failed to update, can not found '
                      'port info.'))

            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(allocation_id_update, network_id)
            self._neutron_update_floating_ip(allocation_id_update, port_id)
            self.resource_id_set(allocation_id_update)

    def _handle_update_portInfo(self, prop_diff):
        instance_id_update = prop_diff.get(self.INSTANCE_ID)
        ni_id_update = prop_diff.get(self.NETWORK_INTERFACE_ID)
        eip = self.properties[self.EIP]
        allocation_id = self.properties[self.ALLOCATION_ID]
        # if update portInfo, no need to detach the port from
        # old instance/floatingip.
        if eip:
            server = self.nova().servers.get(instance_id_update)
            server.add_floating_ip(eip)
        else:
            port_id, port_rsrc = self._get_port_info(ni_id_update,
                                                     instance_id_update)
            if not port_id or not port_rsrc:
                LOG.error(_LE('Port not specified.'))
                raise exception.NotFound(
                    _('Failed to update, can not found '
                      'port info.'))

            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(allocation_id, network_id)
            self._neutron_update_floating_ip(allocation_id, port_id)

    def _validate_update_properties(self, prop_diff):
        # according to aws doc, when update allocation_id or eip,
        # if you also change the InstanceId or NetworkInterfaceId,
        # should go to Replacement flow
        if self.ALLOCATION_ID in prop_diff or self.EIP in prop_diff:
            instance_id = prop_diff.get(self.INSTANCE_ID)
            ni_id = prop_diff.get(self.NETWORK_INTERFACE_ID)

            if instance_id or ni_id:
                raise resource.UpdateReplace(self.name)

        # according to aws doc, when update the instance_id or
        # network_interface_id, if you also change the EIP or
        # ALLOCATION_ID, should go to Replacement flow
        if (self.INSTANCE_ID in prop_diff
                or self.NETWORK_INTERFACE_ID in prop_diff):
            eip = prop_diff.get(self.EIP)
            allocation_id = prop_diff.get(self.ALLOCATION_ID)
            if eip or allocation_id:
                raise resource.UpdateReplace(self.name)

    def handle_create(self):
        """Add a floating IP address to a server."""
        if self.properties[self.EIP]:
            server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
            server.add_floating_ip(self.properties[self.EIP])
            self.resource_id_set(self.properties[self.EIP])
            LOG.debug(
                'ElasticIpAssociation '
                '%(instance)s.add_floating_ip(%(eip)s)', {
                    'instance': self.properties[self.INSTANCE_ID],
                    'eip': self.properties[self.EIP]
                })
        elif self.properties[self.ALLOCATION_ID]:
            ni_id = self.properties[self.NETWORK_INTERFACE_ID]
            instance_id = self.properties[self.INSTANCE_ID]
            port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
            if not port_id or not port_rsrc:
                LOG.warn(_LW('Skipping association, resource not specified'))
                return

            float_id = self.properties[self.ALLOCATION_ID]
            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(float_id, network_id)

            self._neutron_update_floating_ip(float_id, port_id)

            self.resource_id_set(float_id)

    def handle_delete(self):
        """Remove a floating IP address from a server or port."""
        if self.resource_id is None:
            return

        if self.properties[self.EIP]:
            instance_id = self.properties[self.INSTANCE_ID]
            eip = self.properties[self.EIP]
            self._nova_remove_floating_ip(instance_id,
                                          eip,
                                          ignore_not_found=True)
        elif self.properties[self.ALLOCATION_ID]:
            float_id = self.properties[self.ALLOCATION_ID]
            self._neutron_update_floating_ip(float_id,
                                             port_id=None,
                                             ignore_not_found=True)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self._validate_update_properties(prop_diff)
            if self.ALLOCATION_ID in prop_diff or self.EIP in prop_diff:
                self._handle_update_eipInfo(prop_diff)
            elif (self.INSTANCE_ID in prop_diff
                  or self.NETWORK_INTERFACE_ID in prop_diff):
                self._handle_update_portInfo(prop_diff)
Exemplo n.º 7
0
class Policy(resource.Resource):
    """A resource that creates a Senlin Policy.

    A policy is a set of rules that can be checked and/or enforced when
    an action is performed on a Cluster.
    """

    support_status = support.SupportStatus(version='6.0.0')

    default_client_name = 'senlin'

    PROPERTIES = (
        NAME,
        TYPE,
        POLICY_PROPS,
        BINDINGS,
    ) = ('name', 'type', 'properties', 'bindings')

    _BINDINGS = (
        BD_CLUSTER,
        BD_ENABLED,
    ) = ('cluster', 'enabled')

    _ACTION_STATUS = (
        ACTION_SUCCEEDED,
        ACTION_FAILED,
    ) = (
        'SUCCEEDED',
        'FAILED',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the senlin policy. By default, physical resource name '
              'is used.'),
            update_allowed=True,
        ),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of senlin policy.'),
            required=True,
            constraints=[constraints.CustomConstraint('senlin.policy_type')]),
        POLICY_PROPS:
        properties.Schema(
            properties.Schema.MAP,
            _('Properties of this policy.'),
        ),
        BINDINGS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of clusters to which this policy is attached.'),
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    BD_CLUSTER:
                    properties.Schema(
                        properties.Schema.STRING,
                        _("The name or ID of target cluster."),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('senlin.cluster')
                        ]),
                    BD_ENABLED:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _("Whether enable this policy on that cluster."),
                        default=True,
                    ),
                }))
    }

    def remove_bindings(self, bindings):
        for bd in bindings:
            try:
                bd['action'] = self.client().cluster_detach_policy(
                    bd[self.BD_CLUSTER], self.resource_id)['action']
                bd['finished'] = False
            except Exception as ex:
                # policy didn't attach to cluster, skip.
                if (self.client_plugin().is_bad_request(ex)
                        or self.client_plugin().is_not_found(ex)):
                    bd['finished'] = True
                else:
                    raise ex

    def add_bindings(self, bindings):
        for bd in bindings:
            bd['action'] = self.client().cluster_attach_policy(
                bd[self.BD_CLUSTER],
                self.resource_id,
                enabled=bd[self.BD_ENABLED])['action']
            bd['finished'] = False

    def check_action_done(self, bindings):
        ret = True
        if not bindings:
            return ret
        for bd in bindings:
            if bd.get('finished', False):
                continue
            action = self.client().get_action(bd['action'])
            if action.status == self.ACTION_SUCCEEDED:
                bd['finished'] = True
            elif action.status == self.ACTION_FAILED:
                err_msg = _('Failed to execute %(action)s for '
                            '%(cluster)s: %(reason)s') % {
                                'action': action.action,
                                'cluster': bd[self.BD_CLUSTER],
                                'reason': action.status_reason
                            }
                raise exception.ResourceInError(status_reason=err_msg,
                                                resource_status=self.FAILED)
            else:
                ret = False
        return ret

    def handle_create(self):
        params = {
            'name': (self.properties[self.NAME]
                     or self.physical_resource_name()),
            'spec':
            self.client_plugin().generate_spec(
                self.properties[self.TYPE], self.properties[self.POLICY_PROPS])
        }

        policy = self.client().create_policy(**params)
        self.resource_id_set(policy.id)
        bindings = copy.deepcopy(self.properties[self.BINDINGS])
        if bindings:
            self.add_bindings(bindings)
        return bindings

    def check_create_complete(self, bindings):
        return self.check_action_done(bindings)

    def handle_delete(self):
        return copy.deepcopy(self.properties[self.BINDINGS])

    def check_delete_complete(self, bindings):
        if not self.resource_id:
            return True
        self.remove_bindings(bindings)
        if self.check_action_done(bindings):
            with self.client_plugin().ignore_not_found:
                self.client().delete_policy(self.resource_id)
                return True
        return False

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if self.NAME in prop_diff:
            param = {'name': prop_diff[self.NAME]}
            self.client().update_policy(self.resource_id, **param)
        actions = dict()
        if self.BINDINGS in prop_diff:
            old = self.properties[self.BINDINGS] or []
            new = prop_diff[self.BINDINGS] or []
            actions['remove'] = [bd for bd in old if bd not in new]
            actions['add'] = [bd for bd in new if bd not in old]
            self.remove_bindings(actions['remove'])
        return actions

    def check_update_complete(self, actions):
        ret = True
        remove_done = self.check_action_done(actions.get('remove', []))
        # wait until detach finished, then start attach
        if remove_done and 'add' in actions:
            if not actions.get('add_started', False):
                self.add_bindings(actions['add'])
                actions['add_started'] = True
            ret = self.check_action_done(actions['add'])
        return ret

    def _show_resource(self):
        policy = self.client().get_policy(self.resource_id)
        return policy.to_dict()
Exemplo n.º 8
0
class Group(resource.Resource):
    """Represents a scaling group."""

    # pyrax differs drastically from the actual Auto Scale API. We'll prefer
    # the true API here, but since pyrax doesn't support the full flexibility
    # of the API, we'll have to restrict what users can provide.

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    # properties are identical to the API POST /groups.
    PROPERTIES = (
        GROUP_CONFIGURATION,
        LAUNCH_CONFIGURATION,
    ) = (
        'groupConfiguration',
        'launchConfiguration',
    )

    _GROUP_CONFIGURATION_KEYS = (
        GROUP_CONFIGURATION_MAX_ENTITIES,
        GROUP_CONFIGURATION_COOLDOWN,
        GROUP_CONFIGURATION_NAME,
        GROUP_CONFIGURATION_MIN_ENTITIES,
        GROUP_CONFIGURATION_METADATA,
    ) = (
        'maxEntities',
        'cooldown',
        'name',
        'minEntities',
        'metadata',
    )

    _LAUNCH_CONFIG_KEYS = (
        LAUNCH_CONFIG_ARGS,
        LAUNCH_CONFIG_TYPE,
    ) = (
        'args',
        'type',
    )

    _LAUNCH_CONFIG_ARGS_KEYS = (
        LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
        LAUNCH_CONFIG_ARGS_SERVER,
        LAUNCH_CONFIG_ARGS_STACK,
    ) = (
        'loadBalancers',
        'server',
        'stack',
    )

    _LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
        LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
        LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
    ) = (
        'loadBalancerId',
        'port',
    )

    _LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
        LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
        LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
        LAUNCH_CONFIG_ARGS_SERVER_METADATA,
        LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
        LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
        LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
        LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
        LAUNCH_CONFIG_ARGS_SERVER_USER_DATA,
        LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) = (
            'name',
            'flavorRef',
            'imageRef',
            'metadata',
            'personality',
            'networks',
            'diskConfig',  # technically maps to OS-DCF:diskConfig
            'key_name',
            'user_data',
            'config_drive')

    _LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
        LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID, ) = ('uuid', )

    _LAUNCH_CONFIG_ARGS_STACK_KEYS = (
        LAUNCH_CONFIG_ARGS_STACK_TEMPLATE,
        LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL,
        LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK,
        LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT, LAUNCH_CONFIG_ARGS_STACK_FILES,
        LAUNCH_CONFIG_ARGS_STACK_PARAMETERS,
        LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS) = ('template', 'template_url',
                                                  'disable_rollback',
                                                  'environment', 'files',
                                                  'parameters', 'timeout_mins')

    _launch_configuration_args_schema = {
        LAUNCH_CONFIG_ARGS_LOAD_BALANCERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of load balancers to hook the '
              'server up to. If not specified, no '
              'load balancing will be configured.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID:
                    properties.Schema(properties.Schema.STRING,
                                      _('ID of the load balancer.'),
                                      required=True),
                    LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Server port to connect the load balancer to.')),
                },
            )),
        LAUNCH_CONFIG_ARGS_SERVER:
        properties.Schema(
            properties.Schema.MAP,
            _('Server creation arguments, as accepted by the Cloud Servers '
              'server creation API.'),
            required=False,
            schema={
                LAUNCH_CONFIG_ARGS_SERVER_NAME:
                properties.Schema(properties.Schema.STRING,
                                  _('Server name.'),
                                  required=True),
                LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The ID or name of the flavor to boot onto.'),
                    constraints=[constraints.CustomConstraint('nova.flavor')],
                    required=True),
                LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The ID or name of the image to boot with.'),
                    constraints=[constraints.CustomConstraint('glance.image')],
                    required=True),
                LAUNCH_CONFIG_ARGS_SERVER_METADATA:
                properties.Schema(properties.Schema.MAP,
                                  _('Metadata key and value pairs.')),
                LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY:
                properties.Schema(properties.Schema.MAP,
                                  _('File path and contents.')),
                LAUNCH_CONFIG_ARGS_SERVER_CDRIVE:
                properties.Schema(properties.Schema.BOOLEAN,
                                  _('Enable config drive on the instance.')),
                LAUNCH_CONFIG_ARGS_SERVER_USER_DATA:
                properties.Schema(
                    properties.Schema.STRING,
                    _('User data for bootstrapping the instance.')),
                LAUNCH_CONFIG_ARGS_SERVER_NETWORKS:
                properties.Schema(
                    properties.Schema.LIST,
                    _('Networks to attach to. If unspecified, the instance '
                      'will be attached to the public Internet and private '
                      'ServiceNet networks.'),
                    schema=properties.Schema(
                        properties.Schema.MAP,
                        schema={
                            LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('UUID of network to attach to.'),
                                required=True)
                        })),
                LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Configuration specifying the partition layout. AUTO to '
                      'create a partition utilizing the entire disk, and '
                      'MANUAL to create a partition matching the source '
                      'image.'),
                    constraints=[
                        constraints.AllowedValues(['AUTO', 'MANUAL']),
                    ]),
                LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Name of a previously created SSH keypair to allow '
                      'key-based authentication to the server.')),
            },
        ),
        LAUNCH_CONFIG_ARGS_STACK:
        properties.Schema(
            properties.Schema.MAP,
            _('The attributes that Auto Scale uses to create a new stack. The '
              'attributes that you specify for the stack entity apply to all '
              'new stacks in the scaling group. Note the stack arguments are '
              'directly passed to Heat when creating a stack.'),
            schema={
                LAUNCH_CONFIG_ARGS_STACK_TEMPLATE:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The template that describes the stack. Either the '
                      'template or template_url property must be specified.'),
                ),
                LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL:
                properties.Schema(
                    properties.Schema.STRING,
                    _('A URI to a template. Either the template or '
                      'template_url property must be specified.')),
                LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('Keep the resources that have been created if the stack '
                      'fails to create. Defaults to True.'),
                    default=True),
                LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT:
                properties.Schema(
                    properties.Schema.MAP,
                    _('The environment for the stack.'),
                ),
                LAUNCH_CONFIG_ARGS_STACK_FILES:
                properties.Schema(
                    properties.Schema.MAP,
                    _('The contents of files that the template references.')),
                LAUNCH_CONFIG_ARGS_STACK_PARAMETERS:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Key/value pairs of the parameters and their values to '
                      'pass to the parameters in the template.')),
                LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS:
                properties.Schema(properties.Schema.INTEGER,
                                  _('The stack creation timeout in minutes.'))
            })
    }

    properties_schema = {
        GROUP_CONFIGURATION:
        properties.Schema(
            properties.Schema.MAP,
            _('Group configuration.'),
            schema={
                GROUP_CONFIGURATION_MAX_ENTITIES:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Maximum number of entities in this scaling group.'),
                    required=True),
                GROUP_CONFIGURATION_COOLDOWN:
                properties.Schema(
                    properties.Schema.NUMBER,
                    _('Number of seconds after capacity changes during '
                      'which further capacity changes are disabled.'),
                    required=True),
                GROUP_CONFIGURATION_NAME:
                properties.Schema(properties.Schema.STRING,
                                  _('Name of the scaling group.'),
                                  required=True),
                GROUP_CONFIGURATION_MIN_ENTITIES:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Minimum number of entities in this scaling group.'),
                    required=True),
                GROUP_CONFIGURATION_METADATA:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Arbitrary key/value metadata to associate with '
                      'this group.')),
            },
            required=True,
            update_allowed=True),
        LAUNCH_CONFIGURATION:
        properties.Schema(
            properties.Schema.MAP,
            _('Launch configuration.'),
            schema={
                LAUNCH_CONFIG_ARGS:
                properties.Schema(properties.Schema.MAP,
                                  _('Type-specific launch arguments.'),
                                  schema=_launch_configuration_args_schema,
                                  required=True),
                LAUNCH_CONFIG_TYPE:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Launch configuration method. Only launch_server and '
                      'launch_stack are currently supported.'),
                    required=True,
                    constraints=[
                        constraints.AllowedValues(
                            ['launch_server', 'launch_stack']),
                    ]),
            },
            required=True,
            update_allowed=True),
        # We don't allow scaling policies to be specified here, despite the
        # fact that the API supports it. Users should use the ScalingPolicy
        # resource.
    }

    def _get_group_config_args(self, groupconf):
        """Get the groupConfiguration-related pyrax arguments."""
        return dict(
            name=groupconf[self.GROUP_CONFIGURATION_NAME],
            cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
            min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
            max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
            metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))

    def _get_launch_config_server_args(self, launchconf):
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
        server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
        lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
        lbs = copy.deepcopy(lb_args)
        for lb in lbs:
            # if the port is not specified, the lbid must be that of a
            # RackConnectV3 lb pool.
            if not lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]:
                del lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]
                continue
            lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
            lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
        personality = server_args.get(
            self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
        if personality:
            personality = [{
                'path': k,
                'contents': v
            } for k, v in personality.items()]
        user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
        cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE)
                  or bool(user_data is not None and len(user_data.strip())))
        image_id = self.client_plugin('glance').find_image_by_name_or_id(
            server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF])
        flavor_id = self.client_plugin('nova').find_flavor_by_name_or_id(
            server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF])

        return dict(
            launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
            server_name=server_args[self.GROUP_CONFIGURATION_NAME],
            image=image_id,
            flavor=flavor_id,
            disk_config=server_args.get(
                self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
            metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
            config_drive=cdrive,
            user_data=user_data,
            personality=personality,
            networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
            load_balancers=lbs,
            key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
        )

    def _get_launch_config_stack_args(self, launchconf):
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
        stack_args = lcargs[self.LAUNCH_CONFIG_ARGS_STACK]
        return dict(
            launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
            template=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE],
            template_url=stack_args[
                self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL],
            disable_rollback=stack_args[
                self.LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK],
            environment=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT],
            files=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_FILES],
            parameters=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_PARAMETERS],
            timeout_mins=stack_args[
                self.LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS])

    def _get_launch_config_args(self, launchconf):
        """Get the launchConfiguration-related pyrax arguments."""
        if launchconf[self.LAUNCH_CONFIG_ARGS].get(
                self.LAUNCH_CONFIG_ARGS_SERVER):
            return self._get_launch_config_server_args(launchconf)
        else:
            return self._get_launch_config_stack_args(launchconf)

    def _get_create_args(self):
        """Get pyrax-style arguments for creating a scaling group."""
        args = self._get_group_config_args(
            self.properties[self.GROUP_CONFIGURATION])
        args['group_metadata'] = args.pop('metadata')
        args.update(
            self._get_launch_config_args(
                self.properties[self.LAUNCH_CONFIGURATION]))
        return args

    def handle_create(self):
        """Create the autoscaling group and set resource_id.

        The resource_id is set to the resulting group's ID.
        """
        asclient = self.auto_scale()
        group = asclient.create(**self._get_create_args())
        self.resource_id_set(str(group.id))

    def handle_check(self):
        self.auto_scale().get(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Update the group configuration and the launch configuration."""
        asclient = self.auto_scale()
        if self.GROUP_CONFIGURATION in prop_diff:
            args = self._get_group_config_args(
                prop_diff[self.GROUP_CONFIGURATION])
            asclient.replace(self.resource_id, **args)
        if self.LAUNCH_CONFIGURATION in prop_diff:
            args = self._get_launch_config_args(
                prop_diff[self.LAUNCH_CONFIGURATION])
            asclient.replace_launch_config(self.resource_id, **args)

    def handle_delete(self):
        """Delete the scaling group.

        Since Auto Scale doesn't allow deleting a group until all its servers
        are gone, we must set the minEntities and maxEntities of the group to 0
        and then keep trying the delete until Auto Scale has deleted all the
        servers and the delete will succeed.
        """
        if self.resource_id is None:
            return
        asclient = self.auto_scale()
        args = self._get_group_config_args(
            self.properties[self.GROUP_CONFIGURATION])
        args['min_entities'] = 0
        args['max_entities'] = 0
        try:
            asclient.replace(self.resource_id, **args)
        except NotFound:
            pass

    def check_delete_complete(self, result):
        """Try the delete operation until it succeeds."""
        if self.resource_id is None:
            return True
        try:
            self.auto_scale().delete(self.resource_id)
        except Forbidden:
            return False
        except NotFound:
            return True
        else:
            return True

    def _check_rackconnect_v3_pool_exists(self, pool_id):
        pools = self.client("rackconnect").list_load_balancer_pools()
        if pool_id in (p.id for p in pools):
            return True
        return False

    def validate(self):
        super(Group, self).validate()
        launchconf = self.properties[self.LAUNCH_CONFIGURATION]
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]

        server_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_SERVER)
        st_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_STACK)

        # launch_server and launch_stack are required and mutually exclusive.
        if ((not server_args and not st_args) or (server_args and st_args)):
            msg = (
                _('Must provide one of %(server)s or %(stack)s in %(conf)s') %
                {
                    'server': self.LAUNCH_CONFIG_ARGS_SERVER,
                    'stack': self.LAUNCH_CONFIG_ARGS_STACK,
                    'conf': self.LAUNCH_CONFIGURATION
                })
            raise exception.StackValidationFailed(msg)

        lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
        lbs = copy.deepcopy(lb_args)
        for lb in lbs:
            lb_port = lb.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT)
            lb_id = lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID]
            if not lb_port:
                # check if lb id is a valid RCV3 pool id
                if not self._check_rackconnect_v3_pool_exists(lb_id):
                    msg = _('Could not find RackConnectV3 pool '
                            'with id %s') % (lb_id)
                    raise exception.StackValidationFailed(msg)

        if st_args:
            st_tmpl = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE)
            st_tmpl_url = st_args.get(
                self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL)
            st_env = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT)
            # template and template_url are required and mutually exclusive.
            if ((not st_tmpl and not st_tmpl_url)
                    or (st_tmpl and st_tmpl_url)):
                msg = _('Must provide one of template or template_url.')
                raise exception.StackValidationFailed(msg)

            if st_tmpl:
                st_files = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_FILES)
                try:
                    tmpl = template_format.simple_parse(st_tmpl)
                    templatem.Template(tmpl, files=st_files, env=st_env)
                except Exception as exc:
                    msg = (_('Encountered error while loading template: %s') %
                           six.text_type(exc))
                    raise exception.StackValidationFailed(msg)

    def auto_scale(self):
        return self.client('auto_scale')
Exemplo n.º 9
0
class ScalingPolicy(resource.Resource):
    """Represents a Rackspace Auto Scale scaling policy."""

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (
        GROUP,
        NAME,
        CHANGE,
        CHANGE_PERCENT,
        DESIRED_CAPACITY,
        COOLDOWN,
        TYPE,
        ARGS,
    ) = (
        'group',
        'name',
        'change',
        'changePercent',
        'desiredCapacity',
        'cooldown',
        'type',
        'args',
    )

    properties_schema = {
        # group isn't in the post body, but it's in the URL to post to.
        GROUP:
        properties.Schema(properties.Schema.STRING,
                          _('Scaling group ID that this policy belongs to.'),
                          required=True),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of this scaling policy.'),
                          required=True,
                          update_allowed=True),
        CHANGE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Amount to add to or remove from current number of instances. '
              'Incompatible with changePercent and desiredCapacity.'),
            update_allowed=True),
        CHANGE_PERCENT:
        properties.Schema(
            properties.Schema.NUMBER,
            _('Percentage-based change to add or remove from current number '
              'of instances. Incompatible with change and desiredCapacity.'),
            update_allowed=True),
        DESIRED_CAPACITY:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Absolute number to set the number of instances to. '
              'Incompatible with change and changePercent.'),
            update_allowed=True),
        COOLDOWN:
        properties.Schema(
            properties.Schema.NUMBER,
            _('Number of seconds after a policy execution during which '
              'further executions are disabled.'),
            update_allowed=True),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Type of this scaling policy. Specifies how the policy is '
              'executed.'),
            required=True,
            constraints=[
                constraints.AllowedValues(
                    ['webhook', 'schedule', 'cloud_monitoring']),
            ],
            update_allowed=True),
        ARGS:
        properties.Schema(properties.Schema.MAP,
                          _('Type-specific arguments for the policy.'),
                          update_allowed=True),
    }

    def _get_args(self, properties):
        """Get pyrax-style create arguments for scaling policies."""
        args = dict(
            scaling_group=properties[self.GROUP],
            name=properties[self.NAME],
            policy_type=properties[self.TYPE],
            cooldown=properties[self.COOLDOWN],
        )
        if properties.get(self.CHANGE) is not None:
            args['change'] = properties[self.CHANGE]
        elif properties.get(self.CHANGE_PERCENT) is not None:
            args['change'] = properties[self.CHANGE_PERCENT]
            args['is_percent'] = True
        elif properties.get(self.DESIRED_CAPACITY) is not None:
            args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
        if properties.get(self.ARGS) is not None:
            args['args'] = properties[self.ARGS]
        return args

    def handle_create(self):
        """Create the scaling policy and initialize the resource ID.

        The resource ID is initialized to {group_id}:{policy_id}.
        """
        asclient = self.auto_scale()
        args = self._get_args(self.properties)
        policy = asclient.add_policy(**args)
        resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
        self.resource_id_set(resource_id)

    def _get_policy_id(self):
        return self.resource_id.split(':', 1)[1]

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        asclient = self.auto_scale()
        props = json_snippet.properties(self.properties_schema, self.context)
        args = self._get_args(props)
        args['policy'] = self._get_policy_id()
        asclient.replace_policy(**args)

    def handle_delete(self):
        """Delete the policy if it exists."""
        asclient = self.auto_scale()
        if self.resource_id is None:
            return
        policy_id = self._get_policy_id()
        try:
            asclient.delete_policy(self.properties[self.GROUP], policy_id)
        except NotFound:
            pass

    def auto_scale(self):
        return self.client('auto_scale')
Exemplo n.º 10
0
class QoSBandwidthLimitRule(QoSRule):
    """A resource for Neutron QoS bandwidth limit rule.

    This rule can be associated with QoS policy, and then the policy
    can be used by neutron port and network, to provide bandwidth limit
    QoS capabilities.

    The default policy usage of this resource is limited to
    administrators only.
    """

    entity = 'bandwidth_limit_rule'

    PROPERTIES = (MAX_BANDWIDTH, MAX_BURST_BANDWIDTH,
                  DIRECTION) = ('max_kbps', 'max_burst_kbps', 'direction')

    properties_schema = {
        MAX_BANDWIDTH:
        properties.Schema(properties.Schema.INTEGER,
                          _('Max bandwidth in kbps.'),
                          required=True,
                          update_allowed=True,
                          constraints=[constraints.Range(min=0)]),
        MAX_BURST_BANDWIDTH:
        properties.Schema(properties.Schema.INTEGER,
                          _('Max burst bandwidth in kbps.'),
                          update_allowed=True,
                          constraints=[constraints.Range(min=0)],
                          default=0),
        DIRECTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Traffic direction from the point of view of the port.'),
            update_allowed=True,
            constraints=[
                constraints.AllowedValues(['egress', 'ingress']),
            ],
            default='egress',
            support_status=support.SupportStatus(version='13.0.0'))
    }

    properties_schema.update(QoSRule.properties_schema)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props.pop(self.POLICY)

        rule = self.client().create_bandwidth_limit_rule(
            self.policy_id,
            {'bandwidth_limit_rule': props})['bandwidth_limit_rule']

        self.resource_id_set(rule['id'])

    def handle_delete(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            self.client().delete_bandwidth_limit_rule(self.resource_id,
                                                      self.policy_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_bandwidth_limit_rule(
                self.resource_id, self.policy_id,
                {'bandwidth_limit_rule': prop_diff})

    def _res_get_args(self):
        return [self.resource_id, self.policy_id]
Exemplo n.º 11
0
class QoSPolicy(neutron.NeutronResource):
    """A resource for Neutron QoS Policy.

    This QoS policy can be associated with neutron resources,
    such as port and network, to provide QoS capabilities.

    The default policy usage of this resource is limited to
    administrators only.
    """

    required_service_extension = 'qos'

    entity = 'qos_policy'

    res_info_key = 'policy'

    support_status = support.SupportStatus(version='6.0.0')

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        SHARED,
        TENANT_ID,
    ) = (
        'name',
        'description',
        'shared',
        'tenant_id',
    )

    ATTRIBUTES = (RULES_ATTR, ) = ('rules', )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name for the QoS policy.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('The description for the QoS policy.'),
                          update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this QoS policy should be shared to other tenants.'),
            default=False,
            update_allowed=True),
        TENANT_ID:
        properties.Schema(properties.Schema.STRING,
                          _('The owner tenant ID of this QoS policy.')),
    }

    attributes_schema = {
        RULES_ATTR:
        attributes.Schema(_("A list of all rules for the QoS policy."),
                          type=attributes.Schema.LIST)
    }

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        policy = self.client().create_qos_policy({'policy': props})['policy']
        self.resource_id_set(policy['id'])

    def handle_delete(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            self.client().delete_qos_policy(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_qos_policy(self.resource_id,
                                            {'policy': prop_diff})
Exemplo n.º 12
0
class SaharaJob(signal_responder.SignalResponder, resource.Resource):
    """A resource for creating Sahara Job.

    A job specifies the type of the job and lists all of the individual
    job binary objects. Can be launched using resource-signal.
    """

    support_status = support.SupportStatus(version='8.0.0')

    PROPERTIES = (NAME, TYPE, MAINS, LIBS, DESCRIPTION, DEFAULT_EXECUTION_DATA,
                  IS_PUBLIC,
                  IS_PROTECTED) = ('name', 'type', 'mains', 'libs',
                                   'description', 'default_execution_data',
                                   'is_public', 'is_protected')

    _EXECUTION_DATA_KEYS = (CLUSTER, INPUT, OUTPUT, CONFIGS, PARAMS, ARGS,
                            IS_PUBLIC,
                            INTERFACE) = ('cluster', 'input', 'output',
                                          'configs', 'params', 'args',
                                          'is_public', 'interface')

    ATTRIBUTES = (EXECUTIONS,
                  DEFAULT_EXECUTION_URL) = ('executions',
                                            'default_execution_url')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _("Name of the job."),
                          constraints=[
                              constraints.Length(min=1, max=50),
                              constraints.AllowedPattern(SAHARA_NAME_REGEX),
                          ],
                          update_allowed=True),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _("Type of the job."),
            constraints=[constraints.CustomConstraint('sahara.job_type')],
            required=True),
        MAINS:
        properties.Schema(
            properties.Schema.LIST,
            _("IDs or names of job's main job binary. In case of specific "
              "Sahara service, this property designed as a list, but accepts "
              "only one item."),
            schema=properties.Schema(
                properties.Schema.STRING,
                _("ID of job's main job binary."),
                constraints=[
                    constraints.CustomConstraint('sahara.job_binary')
                ]),
            constraints=[constraints.Length(max=1)],
            default=[]),
        LIBS:
        properties.Schema(
            properties.Schema.LIST,
            _("IDs or names of job's lib job binaries."),
            schema=properties.Schema(
                properties.Schema.STRING,
                constraints=[
                    constraints.CustomConstraint('sahara.job_binary')
                ]),
            default=[]),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of the job.'),
                          update_allowed=True),
        IS_PUBLIC:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('If True, job will be shared across the tenants.'),
                          update_allowed=True,
                          default=False),
        IS_PROTECTED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('If True, job will be protected from modifications and '
              'can not be deleted until this property is set to False.'),
            update_allowed=True,
            default=False),
        DEFAULT_EXECUTION_DATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Default execution data to use when run signal.'),
            schema={
                CLUSTER:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the cluster to run the job in.'),
                    constraints=[
                        constraints.CustomConstraint('sahara.cluster')
                    ],
                    required=True),
                INPUT:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the input data source.'),
                    constraints=[
                        constraints.CustomConstraint('sahara.data_source')
                    ]),
                OUTPUT:
                properties.Schema(
                    properties.Schema.STRING,
                    _('ID or name of the output data source.'),
                    constraints=[
                        constraints.CustomConstraint('sahara.data_source')
                    ]),
                CONFIGS:
                properties.Schema(properties.Schema.MAP,
                                  _('Config parameters to add to the job.'),
                                  default={}),
                PARAMS:
                properties.Schema(properties.Schema.MAP,
                                  _('Parameters to add to the job.'),
                                  default={}),
                ARGS:
                properties.Schema(properties.Schema.LIST,
                                  _('Arguments to add to the job.'),
                                  schema=properties.Schema(
                                      properties.Schema.STRING, ),
                                  default=[]),
                IS_PUBLIC:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('If True, execution will be shared across the tenants.'),
                    default=False),
                INTERFACE:
                properties.Schema(properties.Schema.MAP,
                                  _('Interface arguments to add to the job.'),
                                  default={})
            },
            update_allowed=True)
    }

    attributes_schema = {
        DEFAULT_EXECUTION_URL:
        attributes.Schema(_("A signed url to create execution specified in "
                            "default_execution_data property."),
                          type=attributes.Schema.STRING),
        EXECUTIONS:
        attributes.Schema(_("List of the job executions."),
                          type=attributes.Schema.LIST)
    }

    default_client_name = 'sahara'

    entity = 'jobs'

    def translation_rules(self, properties):
        return [
            translation.TranslationRule(properties,
                                        translation.TranslationRule.RESOLVE,
                                        [self.MAINS],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resource_by_name_or_id',
                                        entity='job_binaries'),
            translation.TranslationRule(properties,
                                        translation.TranslationRule.RESOLVE,
                                        [self.LIBS],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resource_by_name_or_id',
                                        entity='job_binaries'),
            translation.TranslationRule(
                properties,
                translation.TranslationRule.RESOLVE,
                [self.DEFAULT_EXECUTION_DATA, self.CLUSTER],
                client_plugin=self.client_plugin(),
                finder='find_resource_by_name_or_id',
                entity='clusters'),
            translation.TranslationRule(
                properties,
                translation.TranslationRule.RESOLVE,
                [self.DEFAULT_EXECUTION_DATA, self.INPUT],
                client_plugin=self.client_plugin(),
                finder='find_resource_by_name_or_id',
                entity='data_sources'),
            translation.TranslationRule(
                properties,
                translation.TranslationRule.RESOLVE,
                [self.DEFAULT_EXECUTION_DATA, self.OUTPUT],
                client_plugin=self.client_plugin(),
                finder='find_resource_by_name_or_id',
                entity='data_sources')
        ]

    def handle_create(self):
        args = {
            'name': self.properties[self.NAME]
            or self.physical_resource_name(),
            'type': self.properties[self.TYPE],
            # Note: sahara accepts only one main binary but schema demands
            # that it should be in a list.
            'mains': self.properties[self.MAINS],
            'libs': self.properties[self.LIBS],
            'description': self.properties[self.DESCRIPTION],
            'is_public': self.properties[self.IS_PUBLIC],
            'is_protected': self.properties[self.IS_PROTECTED]
        }

        job = self.client().jobs.create(**args)
        self.resource_id_set(job.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if self.NAME in prop_diff:
            name = prop_diff[self.NAME] or self.physical_resource_name()
            prop_diff[self.NAME] = name
        if self.DEFAULT_EXECUTION_DATA in prop_diff:
            del prop_diff[self.DEFAULT_EXECUTION_DATA]

        if prop_diff:
            self.client().jobs.update(self.resource_id, **prop_diff)

    def handle_signal(self, details):
        data = details or self.properties.get(self.DEFAULT_EXECUTION_DATA)
        execution_args = {
            'job_id': self.resource_id,
            'cluster_id': data.get(self.CLUSTER),
            'input_id': data.get(self.INPUT),
            'output_id': data.get(self.OUTPUT),
            'is_public': data.get(self.IS_PUBLIC),
            'interface': data.get(self.INTERFACE),
            'configs': {
                'configs': data.get(self.CONFIGS),
                'params': data.get(self.PARAMS),
                'args': data.get(self.ARGS)
            },
            'is_protected': False
        }
        try:
            self.client().job_executions.create(**execution_args)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)

    def handle_delete(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            job_exs = self.client().job_executions.find(id=self.resource_id)
            for ex in job_exs:
                self.client().job_executions.delete(ex.id)
        super(SaharaJob, self).handle_delete()

    def _resolve_attribute(self, name):
        if name == self.DEFAULT_EXECUTION_URL:
            return six.text_type(self._get_ec2_signed_url())
        elif name == self.EXECUTIONS:
            try:
                job_execs = self.client().job_executions.find(
                    id=self.resource_id)
            except Exception:
                return []
            return [execution.to_dict() for execution in job_execs]
Exemplo n.º 13
0
class LoadBalancer(resource.Resource):
    """
    A resource to link a neutron pool with servers.
    """

    PROPERTIES = (
        POOL_ID, PROTOCOL_PORT, MEMBERS,
    ) = (
        'pool_id', 'protocol_port', 'members',
    )

    properties_schema = {
        POOL_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the load balancing pool.'),
            required=True,
            update_allowed=True
        ),
        PROTOCOL_PORT: properties.Schema(
            properties.Schema.INTEGER,
            _('Port number on which the servers are running on the members.'),
            required=True
        ),
        MEMBERS: properties.Schema(
            properties.Schema.LIST,
            _('The list of Nova server IDs load balanced.'),
            default=[],
            update_allowed=True
        ),
    }

    update_allowed_keys = ('Properties',)

    def handle_create(self):
        pool = self.properties[self.POOL_ID]
        client = self.neutron()
        nova_client = self.nova()
        protocol_port = self.properties[self.PROTOCOL_PORT]

        for member in self.properties.get(self.MEMBERS):
            address = nova_utils.server_to_ipaddress(nova_client, member)
            lb_member = client.create_member({
                'member': {
                    'pool_id': pool,
                    'address': address,
                    'protocol_port': protocol_port}})['member']
            db_api.resource_data_set(self, member, lb_member['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if self.MEMBERS in prop_diff:
            members = set(prop_diff[self.MEMBERS])
            rd_members = db_api.resource_data_get_all(self)
            old_members = set(rd_members.keys())
            client = self.neutron()
            for member in old_members - members:
                member_id = rd_members[member]
                try:
                    client.delete_member(member_id)
                except NeutronClientException as ex:
                    if ex.status_code != 404:
                        raise ex
                db_api.resource_data_delete(self, member)
            pool = self.properties[self.POOL_ID]
            nova_client = self.nova()
            protocol_port = self.properties[self.PROTOCOL_PORT]
            for member in members - old_members:
                address = nova_utils.server_to_ipaddress(nova_client, member)
                lb_member = client.create_member({
                    'member': {
                        'pool_id': pool,
                        'address': address,
                        'protocol_port': protocol_port}})['member']
                db_api.resource_data_set(self, member, lb_member['id'])

    def handle_delete(self):
        client = self.neutron()
        for member in self.properties.get(self.MEMBERS):
            try:
                member_id = db_api.resource_data_get(self, member)
                client.delete_member(member_id)
                db_api.resource_data_delete(self, member)
            except NeutronClientException as ex:
                if ex.status_code != 404:
                    raise ex
            except exception.NotFound:
                pass
Exemplo n.º 14
0
class PoolMember(neutron.NeutronResource):
    """
    A resource to handle load balancer members.
    """

    PROPERTIES = (
        POOL_ID, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
    ) = (
        'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up',
    )

    properties_schema = {
        POOL_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the load balancing pool.'),
            required=True,
            update_allowed=True
        ),
        ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('IP address of the pool member on the pool network.'),
            required=True
        ),
        PROTOCOL_PORT: properties.Schema(
            properties.Schema.INTEGER,
            _('TCP port on which the pool member listens for requests or '
              'connections.'),
            required=True,
            constraints=[
                constraints.Range(0, 65535),
            ]
        ),
        WEIGHT: properties.Schema(
            properties.Schema.INTEGER,
            _('Weight of pool member in the pool (default to 1).'),
            constraints=[
                constraints.Range(0, 256),
            ],
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the pool member.'),
            default=True
        ),
    }

    attributes_schema = {
        'admin_state_up': _('The administrative state of this pool '
                            'member.'),
        'tenant_id': _('Tenant owning the pool member.'),
        'weight': _('Weight of the pool member in the pool.'),
        'address': _('IP address of the pool member.'),
        'pool_id': _('The ID of the load balancing pool.'),
        'protocol_port': _('TCP port on which the pool member listens for'
                           'requests or connections.'),
        'show': _('All attributes.'),
    }

    update_allowed_keys = ('Properties',)

    def handle_create(self):
        pool = self.properties[self.POOL_ID]
        client = self.neutron()
        protocol_port = self.properties[self.PROTOCOL_PORT]
        address = self.properties[self.ADDRESS]
        admin_state_up = self.properties[self.ADMIN_STATE_UP]
        weight = self.properties.get(self.WEIGHT)

        params = {
            'pool_id': pool,
            'address': address,
            'protocol_port': protocol_port,
            'admin_state_up': admin_state_up
        }

        if weight is not None:
            params['weight'] = weight

        member = client.create_member({'member': params})['member']
        self.resource_id_set(member['id'])

    def _show_resource(self):
        return self.neutron().show_member(self.resource_id)['member']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.neutron().update_member(
                self.resource_id, {'member': prop_diff})

    def handle_delete(self):
        client = self.neutron()
        try:
            client.delete_member(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            return self._delete_task()
Exemplo n.º 15
0
class ElasticIp(resource.Resource):
    PROPERTIES = (
        DOMAIN, INSTANCE_ID,
    ) = (
        'Domain', 'InstanceId',
    )

    properties_schema = {
        DOMAIN: properties.Schema(
            properties.Schema.STRING,
            _('Set to "vpc" to have IP address allocation associated to your '
              'VPC.'),
            constraints=[
                constraints.AllowedValues(['vpc']),
            ]
        ),
        INSTANCE_ID: properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP.')
        ),
    }

    attributes_schema = {
        'AllocationId': _('ID that AWS assigns to represent the allocation of'
                          ' the address for use with Amazon VPC. Returned only'
                          ' for VPC elastic IP addresses.')
    }

    def __init__(self, name, json_snippet, stack):
        super(ElasticIp, self).__init__(name, json_snippet, stack)
        self.ipaddress = None

    def _ipaddress(self):
        if self.ipaddress is None and self.resource_id is not None:
            if self.properties[self.DOMAIN] and clients.neutronclient:
                ne = clients.neutronclient.exceptions.NeutronClientException
                try:
                    ips = self.neutron().show_floatingip(self.resource_id)
                except ne as e:
                    if e.status_code == 404:
                        logger.warn(_("Floating IPs not found: %s") % str(e))
                else:
                    self.ipaddress = ips['floatingip']['floating_ip_address']
            else:
                try:
                    ips = self.nova().floating_ips.get(self.resource_id)
                except clients.novaclient.exceptions.NotFound as ex:
                    logger.warn(_("Floating IPs not found: %s") % str(ex))
                else:
                    self.ipaddress = ips.ip
        return self.ipaddress or ''

    def handle_create(self):
        """Allocate a floating IP for the current tenant."""
        ips = None
        if self.properties[self.DOMAIN] and clients.neutronclient:
            from heat.engine.resources.internet_gateway import InternetGateway

            ext_net = InternetGateway.get_external_network_id(self.neutron())
            props = {'floating_network_id': ext_net}
            ips = self.neutron().create_floatingip({
                'floatingip': props})['floatingip']
            self.ipaddress = ips['floating_ip_address']
            self.resource_id_set(ips['id'])
            logger.info(_('ElasticIp create %s') % str(ips))
        else:
            if self.properties[self.DOMAIN]:
                raise exception.Error(_('Domain property can not be set on '
                                      'resource %s without Neutron available')
                                      % self.name)
            try:
                ips = self.nova().floating_ips.create()
            except clients.novaclient.exceptions.NotFound:
                with excutils.save_and_reraise_exception():
                    msg = _("No default floating IP pool configured. "
                            "Set 'default_floating_pool' in nova.conf.")
                    logger.error(msg)

            if ips:
                self.ipaddress = ips.ip
                self.resource_id_set(ips.id)
                logger.info(_('ElasticIp create %s') % str(ips))

        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            server = self.nova().servers.get(instance_id)
            server.add_floating_ip(self._ipaddress())

    def handle_delete(self):
        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            try:
                server = self.nova().servers.get(instance_id)
                if server:
                    server.remove_floating_ip(self._ipaddress())
            except clients.novaclient.exceptions.NotFound:
                pass

        """De-allocate a floating IP."""
        if self.resource_id is not None:
            if self.properties[self.DOMAIN] and clients.neutronclient:
                ne = clients.neutronclient.exceptions.NeutronClientException
                try:
                    self.neutron().delete_floatingip(self.resource_id)
                except ne as e:
                    if e.status_code != 404:
                        raise e
            else:
                try:
                    self.nova().floating_ips.delete(self.resource_id)
                except clients.novaclient.exceptions.NotFound:
                    pass

    def FnGetRefId(self):
        return unicode(self._ipaddress())

    def _resolve_attribute(self, name):
        if name == 'AllocationId':
            return unicode(self.resource_id)
Exemplo n.º 16
0
class WebHook(resource.Resource):
    """Represents a Rackspace AutoScale webhook.

    Exposes the URLs of the webhook as attributes.
    """

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (
        POLICY,
        NAME,
        METADATA,
    ) = (
        'policy',
        'name',
        'metadata',
    )

    ATTRIBUTES = (
        EXECUTE_URL,
        CAPABILITY_URL,
    ) = (
        'executeUrl',
        'capabilityUrl',
    )

    properties_schema = {
        POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('The policy that this webhook should apply to, in '
              '{group_id}:{policy_id} format. Generally a Ref to a Policy '
              'resource.'),
            required=True),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of this webhook.'),
                          required=True,
                          update_allowed=True),
        METADATA:
        properties.Schema(properties.Schema.MAP,
                          _('Arbitrary key/value metadata for this webhook.'),
                          update_allowed=True),
    }

    attributes_schema = {
        EXECUTE_URL:
        attributes.Schema(
            _("The url for executing the webhook (requires auth)."),
            cache_mode=attributes.Schema.CACHE_NONE),
        CAPABILITY_URL:
        attributes.Schema(
            _("The url for executing the webhook (doesn't require auth)."),
            cache_mode=attributes.Schema.CACHE_NONE),
    }

    def _get_args(self, props):
        group_id, policy_id = props[self.POLICY].split(':', 1)
        return dict(name=props[self.NAME],
                    scaling_group=group_id,
                    policy=policy_id,
                    metadata=props.get(self.METADATA))

    def handle_create(self):
        asclient = self.auto_scale()
        args = self._get_args(self.properties)
        webhook = asclient.add_webhook(**args)
        self.resource_id_set(webhook.id)

        for link in webhook.links:
            rel_to_key = {'self': 'executeUrl', 'capability': 'capabilityUrl'}
            key = rel_to_key.get(link['rel'])
            if key is not None:
                url = link['href'].encode('utf-8')
                self.data_set(key, url)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        asclient = self.auto_scale()
        args = self._get_args(
            json_snippet.properties(self.properties_schema, self.context))
        args['webhook'] = self.resource_id
        asclient.replace_webhook(**args)

    def _resolve_attribute(self, key):
        v = self.data().get(key)
        if v is not None:
            return v.decode('utf-8')
        else:
            return None

    def handle_delete(self):
        if self.resource_id is None:
            return
        asclient = self.auto_scale()
        group_id, policy_id = self.properties[self.POLICY].split(':', 1)
        try:
            asclient.delete_webhook(group_id, policy_id, self.resource_id)
        except NotFound:
            pass

    def auto_scale(self):
        return self.client('auto_scale')
Exemplo n.º 17
0
class CloudLoadBalancer(resource.Resource):

    """Represents a Rackspace Cloud Loadbalancer."""

    PROPERTIES = (
        NAME, NODES, PROTOCOL, ACCESS_LIST, HALF_CLOSED, ALGORITHM,
        CONNECTION_LOGGING, METADATA, PORT, TIMEOUT,
        CONNECTION_THROTTLE, SESSION_PERSISTENCE, VIRTUAL_IPS,
        CONTENT_CACHING, HEALTH_MONITOR, SSL_TERMINATION, ERROR_PAGE,
    ) = (
        'name', 'nodes', 'protocol', 'accessList', 'halfClosed', 'algorithm',
        'connectionLogging', 'metadata', 'port', 'timeout',
        'connectionThrottle', 'sessionPersistence', 'virtualIps',
        'contentCaching', 'healthMonitor', 'sslTermination', 'errorPage',
    )

    _NODE_KEYS = (
        NODE_ADDRESSES, NODE_PORT, NODE_CONDITION, NODE_TYPE,
        NODE_WEIGHT,
    ) = (
        'addresses', 'port', 'condition', 'type',
        'weight',
    )

    _ACCESS_LIST_KEYS = (
        ACCESS_LIST_ADDRESS, ACCESS_LIST_TYPE,
    ) = (
        'address', 'type',
    )

    _CONNECTION_THROTTLE_KEYS = (
        CONNECTION_THROTTLE_MAX_CONNECTION_RATE,
        CONNECTION_THROTTLE_MIN_CONNECTIONS,
        CONNECTION_THROTTLE_MAX_CONNECTIONS,
        CONNECTION_THROTTLE_RATE_INTERVAL,
    ) = (
        'maxConnectionRate',
        'minConnections',
        'maxConnections',
        'rateInterval',
    )

    _VIRTUAL_IP_KEYS = (
        VIRTUAL_IP_TYPE, VIRTUAL_IP_IP_VERSION, VIRTUAL_IP_ID
    ) = (
        'type', 'ipVersion', 'id'
    )

    _HEALTH_MONITOR_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION, HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT, HEALTH_MONITOR_TYPE, HEALTH_MONITOR_BODY_REGEX,
        HEALTH_MONITOR_HOST_HEADER, HEALTH_MONITOR_PATH,
        HEALTH_MONITOR_STATUS_REGEX,
    ) = (
        'attemptsBeforeDeactivation', 'delay',
        'timeout', 'type', 'bodyRegex',
        'hostHeader', 'path',
        'statusRegex',
    )
    _HEALTH_MONITOR_CONNECT_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION, HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT, HEALTH_MONITOR_TYPE,
    )

    _SSL_TERMINATION_KEYS = (
        SSL_TERMINATION_SECURE_PORT, SSL_TERMINATION_PRIVATEKEY,
        SSL_TERMINATION_CERTIFICATE, SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
        SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
    ) = (
        'securePort', 'privatekey',
        'certificate', 'intermediateCertificate',
        'secureTrafficOnly',
    )

    ATTRIBUTES = (
        PUBLIC_IP, VIPS
    ) = (
        'PublicIp', 'virtualIps'
    )

    ALGORITHMS = ["LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
                  "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"]

    _health_monitor_schema = {
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION: properties.Schema(
            properties.Schema.NUMBER,
            required=True,
            constraints=[
                constraints.Range(1, 10),
            ]
        ),
        HEALTH_MONITOR_DELAY: properties.Schema(
            properties.Schema.NUMBER,
            required=True,
            constraints=[
                constraints.Range(1, 3600),
            ]
        ),
        HEALTH_MONITOR_TIMEOUT: properties.Schema(
            properties.Schema.NUMBER,
            required=True,
            constraints=[
                constraints.Range(1, 300),
            ]
        ),
        HEALTH_MONITOR_TYPE: properties.Schema(
            properties.Schema.STRING,
            required=True,
            constraints=[
                constraints.AllowedValues(['CONNECT', 'HTTP', 'HTTPS']),
            ]
        ),
        HEALTH_MONITOR_BODY_REGEX: properties.Schema(
            properties.Schema.STRING
        ),
        HEALTH_MONITOR_HOST_HEADER: properties.Schema(
            properties.Schema.STRING
        ),
        HEALTH_MONITOR_PATH: properties.Schema(
            properties.Schema.STRING
        ),
        HEALTH_MONITOR_STATUS_REGEX: properties.Schema(
            properties.Schema.STRING
        ),
    }

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING
        ),
        NODES: properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NODE_ADDRESSES: properties.Schema(
                        properties.Schema.LIST,
                        required=True,
                        description=(_("IP addresses for the load balancer "
                                     "node. Must have at least one "
                                     "address.")),
                        schema=properties.Schema(
                            properties.Schema.STRING
                        )
                    ),
                    NODE_PORT: properties.Schema(
                        properties.Schema.NUMBER,
                        required=True
                    ),
                    NODE_CONDITION: properties.Schema(
                        properties.Schema.STRING,
                        default='ENABLED',
                        constraints=[
                            constraints.AllowedValues(['ENABLED',
                                                       'DISABLED']),
                        ]
                    ),
                    NODE_TYPE: properties.Schema(
                        properties.Schema.STRING,
                        constraints=[
                            constraints.AllowedValues(['PRIMARY',
                                                       'SECONDARY']),
                        ]
                    ),
                    NODE_WEIGHT: properties.Schema(
                        properties.Schema.NUMBER,
                        constraints=[
                            constraints.Range(1, 100),
                        ]
                    ),
                },
            ),
            required=True,
            update_allowed=True
        ),
        PROTOCOL: properties.Schema(
            properties.Schema.STRING,
            required=True,
            constraints=[
                constraints.AllowedValues(['DNS_TCP', 'DNS_UDP', 'FTP',
                                           'HTTP', 'HTTPS', 'IMAPS',
                                           'IMAPv4', 'LDAP', 'LDAPS',
                                           'MYSQL', 'POP3', 'POP3S', 'SMTP',
                                           'TCP', 'TCP_CLIENT_FIRST', 'UDP',
                                           'UDP_STREAM', 'SFTP']),
            ]
        ),
        ACCESS_LIST: properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ACCESS_LIST_ADDRESS: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                    ACCESS_LIST_TYPE: properties.Schema(
                        properties.Schema.STRING,
                        required=True,
                        constraints=[
                            constraints.AllowedValues(['ALLOW', 'DENY']),
                        ]
                    ),
                },
            )
        ),
        HALF_CLOSED: properties.Schema(
            properties.Schema.BOOLEAN
        ),
        ALGORITHM: properties.Schema(
            properties.Schema.STRING,
            constraints=[
                constraints.AllowedValues(ALGORITHMS)
            ]
        ),
        CONNECTION_LOGGING: properties.Schema(
            properties.Schema.BOOLEAN
        ),
        METADATA: properties.Schema(
            properties.Schema.MAP
        ),
        PORT: properties.Schema(
            properties.Schema.NUMBER,
            required=True
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.NUMBER,
            constraints=[
                constraints.Range(1, 120),
            ]
        ),
        CONNECTION_THROTTLE: properties.Schema(
            properties.Schema.MAP,
            schema={
                CONNECTION_THROTTLE_MAX_CONNECTION_RATE: properties.Schema(
                    properties.Schema.NUMBER,
                    constraints=[
                        constraints.Range(0, 100000),
                    ]
                ),
                CONNECTION_THROTTLE_MIN_CONNECTIONS: properties.Schema(
                    properties.Schema.NUMBER,
                    constraints=[
                        constraints.Range(1, 1000),
                    ]
                ),
                CONNECTION_THROTTLE_MAX_CONNECTIONS: properties.Schema(
                    properties.Schema.NUMBER,
                    constraints=[
                        constraints.Range(1, 100000),
                    ]
                ),
                CONNECTION_THROTTLE_RATE_INTERVAL: properties.Schema(
                    properties.Schema.NUMBER,
                    constraints=[
                        constraints.Range(1, 3600),
                    ]
                ),
            }
        ),
        SESSION_PERSISTENCE: properties.Schema(
            properties.Schema.STRING,
            constraints=[
                constraints.AllowedValues(['HTTP_COOKIE', 'SOURCE_IP']),
            ]
        ),
        VIRTUAL_IPS: properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    VIRTUAL_IP_TYPE: properties.Schema(
                        properties.Schema.STRING,
                        "The type of VIP (public or internal). This property"
                        " cannot be specified if 'id' is specified. This "
                        "property must be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['SERVICENET',
                                                       'PUBLIC']),
                        ]
                    ),
                    VIRTUAL_IP_IP_VERSION: properties.Schema(
                        properties.Schema.STRING,
                        "IP version of the VIP. This property cannot be "
                        "specified if 'id' is specified. This property must "
                        "be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['IPV6', 'IPV4']),
                        ]
                    ),
                    VIRTUAL_IP_ID: properties.Schema(
                        properties.Schema.NUMBER,
                        "ID of a shared VIP to use instead of creating a "
                        "new one. This property cannot be specified if type"
                        " or version is specified."
                    )
                },
            ),
            required=True,
            constraints=[
                constraints.Length(min=1)
            ]
        ),
        CONTENT_CACHING: properties.Schema(
            properties.Schema.STRING,
            constraints=[
                constraints.AllowedValues(['ENABLED', 'DISABLED']),
            ]
        ),
        HEALTH_MONITOR: properties.Schema(
            properties.Schema.MAP,
            schema=_health_monitor_schema
        ),
        SSL_TERMINATION: properties.Schema(
            properties.Schema.MAP,
            schema={
                SSL_TERMINATION_SECURE_PORT: properties.Schema(
                    properties.Schema.NUMBER,
                    default=443
                ),
                SSL_TERMINATION_PRIVATEKEY: properties.Schema(
                    properties.Schema.STRING,
                    required=True
                ),
                SSL_TERMINATION_CERTIFICATE: properties.Schema(
                    properties.Schema.STRING,
                    required=True
                ),
                # only required if configuring intermediate ssl termination
                # add to custom validation
                SSL_TERMINATION_INTERMEDIATE_CERTIFICATE: properties.Schema(
                    properties.Schema.STRING
                ),
                # pyrax will default to false
                SSL_TERMINATION_SECURE_TRAFFIC_ONLY: properties.Schema(
                    properties.Schema.BOOLEAN,
                    default=False
                ),
            }
        ),
        ERROR_PAGE: properties.Schema(
            properties.Schema.STRING
        ),
    }

    attributes_schema = {
        PUBLIC_IP: attributes.Schema(
            _('Public IP address of the specified instance.')
        ),
        VIPS: attributes.Schema(
            _("A list of assigned virtual ip addresses")
        )
    }

    def __init__(self, name, json_snippet, stack):
        super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
        self.clb = self.cloud_lb()

    def cloud_lb(self):
        return self.client('cloud_lb')

    def _setup_properties(self, properties, function):
        """Use defined schema properties as kwargs for loadbalancer objects."""
        if properties and function:
            return [function(**self._remove_none(item_dict))
                    for item_dict in properties]
        elif function:
            return [function()]

    def _alter_properties_for_api(self):
        """Set up required, but useless, key/value pairs.

        The following properties have useless key/value pairs which must
        be passed into the api. Set them up to make template definition easier.
        """
        session_persistence = None
        if self.SESSION_PERSISTENCE in self.properties.data:
            session_persistence = {'persistenceType':
                                   self.properties[self.SESSION_PERSISTENCE]}
        connection_logging = None
        if self.CONNECTION_LOGGING in self.properties.data:
            connection_logging = {"enabled":
                                  self.properties[self.CONNECTION_LOGGING]}
        metadata = None
        if self.METADATA in self.properties.data:
            metadata = [{'key': k, 'value': v}
                        for k, v
                        in six.iteritems(self.properties[self.METADATA])]

        return (session_persistence, connection_logging, metadata)

    def _check_status(self, loadbalancer, status_list):
        """Update the loadbalancer state, check the status."""
        loadbalancer.get()
        if loadbalancer.status in status_list:
            return True
        else:
            return False

    def _configure_post_creation(self, loadbalancer):
        """Configure all load balancer properties post creation.

        These properties can only be set after the load balancer is created.
        """
        if self.properties[self.ACCESS_LIST]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.add_access_list(self.properties[self.ACCESS_LIST])

        if self.properties[self.ERROR_PAGE]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.set_error_page(self.properties[self.ERROR_PAGE])

        if self.properties[self.SSL_TERMINATION]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            ssl_term = self.properties[self.SSL_TERMINATION]
            loadbalancer.add_ssl_termination(
                ssl_term[self.SSL_TERMINATION_SECURE_PORT],
                ssl_term[self.SSL_TERMINATION_PRIVATEKEY],
                ssl_term[self.SSL_TERMINATION_CERTIFICATE],
                intermediateCertificate=ssl_term[
                    self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE],
                enabled=True,
                secureTrafficOnly=ssl_term[
                    self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY])

        if self.CONTENT_CACHING in self.properties:
            enabled = self.properties[self.CONTENT_CACHING] == 'ENABLED'
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.content_caching = enabled

    def _process_node(self, node):
        if not node.get(self.NODE_ADDRESSES):
            yield node
        else:
            for addr in node.get(self.NODE_ADDRESSES):
                norm_node = copy.deepcopy(node)
                norm_node['address'] = addr
                del norm_node[self.NODE_ADDRESSES]
                yield norm_node

    def _process_nodes(self, node_list):
        node_itr = itertools.imap(self._process_node, node_list)
        return itertools.chain.from_iterable(node_itr)

    def handle_create(self):
        node_list = self._process_nodes(self.properties.get(self.NODES))
        nodes = [self.clb.Node(**node) for node in node_list]
        vips = self.properties.get(self.VIRTUAL_IPS)

        virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)

        (session_persistence, connection_logging, metadata) = \
            self._alter_properties_for_api()

        lb_body = {
            'port': self.properties[self.PORT],
            'protocol': self.properties[self.PROTOCOL],
            'nodes': nodes,
            'virtual_ips': virtual_ips,
            'algorithm': self.properties.get(self.ALGORITHM),
            'halfClosed': self.properties.get(self.HALF_CLOSED),
            'connectionThrottle': self.properties.get(
                self.CONNECTION_THROTTLE),
            'metadata': metadata,
            'healthMonitor': self.properties.get(self.HEALTH_MONITOR),
            'sessionPersistence': session_persistence,
            'timeout': self.properties.get(self.TIMEOUT),
            'connectionLogging': connection_logging,
        }

        lb_name = (self.properties.get(self.NAME) or
                   self.physical_resource_name())
        LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
        loadbalancer = self.clb.create(lb_name, **lb_body)
        self.resource_id_set(str(loadbalancer.id))

        post_create = scheduler.TaskRunner(self._configure_post_creation,
                                           loadbalancer)
        post_create(timeout=600)
        return loadbalancer

    def check_create_complete(self, loadbalancer):
        return self._check_status(loadbalancer, ['ACTIVE'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Add and remove nodes specified in the prop_diff."""
        loadbalancer = self.clb.get(self.resource_id)
        if self.NODES in prop_diff:
            current_nodes = loadbalancer.nodes
            diff_nodes = self._process_nodes(prop_diff[self.NODES])
            #Loadbalancers can be uniquely identified by address and port.
            #Old is a dict of all nodes the loadbalancer currently knows about.
            old = dict(("{0.address}{0.port}".format(node), node)
                       for node in current_nodes)
            #New is a dict of the nodes the loadbalancer will know about after
            #this update.
            new = dict(("%s%s" % (node["address"],
                                  node[self.NODE_PORT]), node)
                       for node in diff_nodes)

            old_set = set(old.keys())
            new_set = set(new.keys())

            deleted = old_set.difference(new_set)
            added = new_set.difference(old_set)
            updated = new_set.intersection(old_set)

            if len(current_nodes) + len(added) - len(deleted) < 1:
                raise ValueError(_("The loadbalancer:%s requires at least one "
                                 "node.") % self.name)
            """
            Add loadbalancers in the new map that are not in the old map.
            Add before delete to avoid deleting the last node and getting in
            an invalid state.
            """
            new_nodes = [self.clb.Node(**new[lb_node])
                         for lb_node in added]
            if new_nodes:
                loadbalancer.add_nodes(new_nodes)

            #Delete loadbalancers in the old dict that are not in the new dict.
            for node in deleted:
                old[node].delete()

            #Update nodes that have been changed
            for node in updated:
                node_changed = False
                for attribute in new[node].keys():
                    if new[node][attribute] != getattr(old[node], attribute):
                        node_changed = True
                        setattr(old[node], attribute, new[node][attribute])
                if node_changed:
                    old[node].update()

    def handle_delete(self):
        if self.resource_id is None:
            return
        try:
            loadbalancer = self.clb.get(self.resource_id)
        except NotFound:
            pass
        else:
            if loadbalancer.status != 'DELETED':
                loadbalancer.delete()

    def _remove_none(self, property_dict):
        """Remove None values that would cause schema validation problems.

        These are values that may be initialized to None.
        """
        return dict((key, value)
                    for (key, value) in six.iteritems(property_dict)
                    if value is not None)

    def validate(self):
        """Validate any of the provided params."""
        res = super(CloudLoadBalancer, self).validate()
        if res:
            return res

        if self.properties.get(self.HALF_CLOSED):
            if not (self.properties[self.PROTOCOL] == 'TCP' or
                    self.properties[self.PROTOCOL] == 'TCP_CLIENT_FIRST'):
                message = (_('The %s property is only available for the TCP '
                             'or TCP_CLIENT_FIRST protocols')
                           % self.HALF_CLOSED)
                raise exception.StackValidationFailed(message=message)

        #health_monitor connect and http types require completely different
        #schema
        if self.properties.get(self.HEALTH_MONITOR):
            prop_val = self.properties[self.HEALTH_MONITOR]
            health_monitor = self._remove_none(prop_val)

            schema = self._health_monitor_schema
            if health_monitor[self.HEALTH_MONITOR_TYPE] == 'CONNECT':
                schema = dict((k, v) for k, v in schema.items()
                              if k in self._HEALTH_MONITOR_CONNECT_KEYS)
            Properties(schema,
                       health_monitor,
                       function.resolve,
                       self.name).validate()

        # if a vip specifies and id, it can't specify version or type;
        # otherwise version and type are required
        for vip in self.properties.get(self.VIRTUAL_IPS, []):
            has_id = vip.get(self.VIRTUAL_IP_ID) is not None
            has_version = vip.get(self.VIRTUAL_IP_IP_VERSION) is not None
            has_type = vip.get(self.VIRTUAL_IP_TYPE) is not None
            if has_id:
                if (has_version or has_type):
                    message = _("Cannot specify type or version if VIP id is"
                                " specified.")
                    raise exception.StackValidationFailed(message=message)
            elif not (has_version and has_type):
                message = _("Must specify VIP type and version if no id "
                            "specified.")
                raise exception.StackValidationFailed(message=message)

    def _public_ip(self, lb):
        for ip in lb.virtual_ips:
            if ip.type == 'PUBLIC':
                return unicode(ip.address)

    def _resolve_attribute(self, key):
        if self.resource_id:
            lb = self.clb.get(self.resource_id)
            attribute_function = {
                self.PUBLIC_IP: self._public_ip(lb),
                self.VIPS: [{"id": vip.id,
                             "type": vip.type,
                             "ip_version": vip.ip_version}
                            for vip in lb.virtual_ips]
            }
            if key not in attribute_function:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            function = attribute_function[key]
            LOG.info(_('%(name)s.GetAtt(%(key)s) == %(function)s'),
                     {'name': self.name, 'key': key, 'function': function})
            return function
Exemplo n.º 18
0
class SaharaCluster(resource.Resource):

    PROPERTIES = (
        NAME,
        PLUGIN_NAME,
        HADOOP_VERSION,
        CLUSTER_TEMPLATE_ID,
        KEY_NAME,
        IMAGE,
        MANAGEMENT_NETWORK,
    ) = (
        'name',
        'plugin_name',
        'hadoop_version',
        'cluster_template_id',
        'key_name',
        'image',
        'neutron_management_network',
    )

    ATTRIBUTES = (
        STATUS,
        INFO,
    ) = (
        "status",
        "info",
    )

    CLUSTER_STATUSES = (CLUSTER_ACTIVE, CLUSTER_ERROR) = ('Active', 'Error')
    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Hadoop cluster name.'),
        ),
        PLUGIN_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        CLUSTER_TEMPLATE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of the Cluster Template used for '
              'Node Groups and configurations.'),
            required=True,
        ),
        KEY_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Keypair added to instances to make them accessible for user.'),
            constraints=[constraints.CustomConstraint('nova.keypair')],
        ),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of the image used to boot Hadoop nodes.'),
            constraints=[constraints.CustomConstraint('glance.image')],
        ),
        MANAGEMENT_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of Neutron network.'),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
    }

    attributes_schema = {
        STATUS: attributes.Schema(_("Cluster status."), ),
        INFO: attributes.Schema(_("Cluster information."), ),
    }

    default_client_name = 'sahara'

    def _cluster_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name
        return self.physical_resource_name()

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID]
        image_id = self.properties.get(self.IMAGE)
        if image_id:
            image_id = self.client_plugin('glance').get_image_id(image_id)

        # check that image is provided in case when
        # cluster template is missing one
        cluster_template = self.client().cluster_templates.get(
            cluster_template_id)
        if cluster_template.default_image_id is None and not image_id:
            msg = _("%(img)s must be provided: Referenced cluster template "
                    "%(tmpl)s has no default_image_id defined.") % {
                        'img': self.IMAGE,
                        'tmpl': cluster_template_id
                    }
            raise exception.StackValidationFailed(message=msg)

        key_name = self.properties.get(self.KEY_NAME)
        net_id = self.properties.get(self.MANAGEMENT_NETWORK)
        if net_id:
            net_id = self.client_plugin('neutron').find_neutron_resource(
                self.properties, self.MANAGEMENT_NETWORK, 'network')

        cluster = self.client().clusters.create(
            self._cluster_name(),
            plugin_name,
            hadoop_version,
            cluster_template_id=cluster_template_id,
            user_keypair_id=key_name,
            default_image_id=image_id,
            net_id=net_id)
        LOG.info(_LI('Cluster "%s" is being started.'), cluster.name)
        self.resource_id_set(cluster.id)
        return self.resource_id

    def check_create_complete(self, cluster_id):
        cluster = self.client().clusters.get(cluster_id)
        if cluster.status == self.CLUSTER_ERROR:
            raise resource.ResourceInError(resource_status=cluster.status)

        if cluster.status != self.CLUSTER_ACTIVE:
            return False

        LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
        return True

    def handle_delete(self):
        if not self.resource_id:
            return

        try:
            self.client().clusters.delete(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return None

        return self.resource_id

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        try:
            cluster = self.client().clusters.get(resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            LOG.info(_LI("Cluster '%s' has been deleted"),
                     self._cluster_name())
            return True
        else:
            if cluster.status == self.CLUSTER_ERROR:
                raise resource.ResourceInError(resource_status=cluster.status)

        return False

    def _resolve_attribute(self, name):
        cluster = self.client().clusters.get(self.resource_id)
        return getattr(cluster, name, None)

    def validate(self):
        res = super(SaharaCluster, self).validate()
        if res:
            return res

        # check if running on neutron and MANAGEMENT_NETWORK missing
        # NOTE(pshchelo): on nova-network with MANAGEMENT_NETWORK present
        # overall stack validation will fail due to neutron.network constraint,
        # although the message will be not really relevant.
        if (self.is_using_neutron()
                and not self.properties.get(self.MANAGEMENT_NETWORK)):
            msg = _("%s must be provided") % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)
Exemplo n.º 19
0
class ElasticIp(resource.Resource):
    PROPERTIES = (
        DOMAIN,
        INSTANCE_ID,
    ) = (
        'Domain',
        'InstanceId',
    )

    ATTRIBUTES = (ALLOCATION_ID, ) = ('AllocationId', )

    properties_schema = {
        DOMAIN:
        properties.Schema(
            properties.Schema.STRING,
            _('Set to "vpc" to have IP address allocation associated to your '
              'VPC.'),
            constraints=[
                constraints.AllowedValues(['vpc']),
            ]),
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.server')]),
    }

    attributes_schema = {
        ALLOCATION_ID:
        attributes.Schema(
            _('ID that AWS assigns to represent the allocation of the address '
              'for use with Amazon VPC. Returned only for VPC elastic IP '
              'addresses.')),
    }

    def __init__(self, name, json_snippet, stack):
        super(ElasticIp, self).__init__(name, json_snippet, stack)
        self.ipaddress = None

    def _ipaddress(self):
        if self.ipaddress is None and self.resource_id is not None:
            if self.properties[self.DOMAIN]:
                try:
                    ips = self.neutron().show_floatingip(self.resource_id)
                except Exception as ex:
                    self.client_plugin('neutron').ignore_not_found(ex)
                else:
                    self.ipaddress = ips['floatingip']['floating_ip_address']
            else:
                try:
                    ips = self.nova().floating_ips.get(self.resource_id)
                except Exception as e:
                    self.client_plugin('nova').ignore_not_found(e)
                else:
                    self.ipaddress = ips.ip
        return self.ipaddress or ''

    def handle_create(self):
        """Allocate a floating IP for the current tenant."""
        ips = None
        if self.properties[self.DOMAIN]:
            from heat.engine.resources import internet_gateway

            ext_net = internet_gateway.InternetGateway.get_external_network_id(
                self.neutron())
            props = {'floating_network_id': ext_net}
            ips = self.neutron().create_floatingip({'floatingip':
                                                    props})['floatingip']
            self.ipaddress = ips['floating_ip_address']
            self.resource_id_set(ips['id'])
            LOG.info(_LI('ElasticIp create %s'), str(ips))
        else:
            try:
                ips = self.nova().floating_ips.create()
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    if self.client_plugin('nova').is_not_found(e):
                        LOG.error(
                            _LE("No default floating IP pool configured."
                                " Set 'default_floating_pool' in "
                                "nova.conf."))

            if ips:
                self.ipaddress = ips.ip
                self.resource_id_set(ips.id)
                LOG.info(_LI('ElasticIp create %s'), str(ips))

        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            server = self.nova().servers.get(instance_id)
            server.add_floating_ip(self._ipaddress())

    def handle_delete(self):
        if self.resource_id is None:
            return
        # may be just create an eip when creation, or create the association
        # failed when creation, there will no association, if we attempt to
        # disassociate, an exception will raised, we need
        # to catch and ignore it, and then to deallocate the eip
        instance_id = self.properties[self.INSTANCE_ID]
        if instance_id:
            try:
                server = self.nova().servers.get(instance_id)
                if server:
                    server.remove_floating_ip(self._ipaddress())
            except Exception as e:
                is_not_found = self.client_plugin('nova').is_not_found(e)
                is_unprocessable_entity = self.client_plugin(
                    'nova').is_unprocessable_entity(e)

                if (not is_not_found and not is_unprocessable_entity):
                    raise

        # deallocate the eip
        if self.properties[self.DOMAIN]:
            try:
                self.neutron().delete_floatingip(self.resource_id)
            except Exception as ex:
                self.client_plugin('neutron').ignore_not_found(ex)
        else:
            try:
                self.nova().floating_ips.delete(self.resource_id)
            except Exception as e:
                self.client_plugin('nova').ignore_not_found(e)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.INSTANCE_ID in prop_diff:
                instance_id = prop_diff.get(self.INSTANCE_ID)
                if instance_id:
                    # no need to remove the floating ip from the old instance,
                    # nova does this automatically when calling
                    # add_floating_ip().
                    server = self.nova().servers.get(instance_id)
                    server.add_floating_ip(self._ipaddress())
                else:
                    # to remove the floating_ip from the old instance
                    instance_id_old = self.properties[self.INSTANCE_ID]
                    if instance_id_old:
                        server = self.nova().servers.get(instance_id_old)
                        server.remove_floating_ip(self._ipaddress())

    def FnGetRefId(self):
        return six.text_type(self._ipaddress())

    def _resolve_attribute(self, name):
        if name == self.ALLOCATION_ID:
            return six.text_type(self.resource_id)
Exemplo n.º 20
0
class ControllerProperties(AviResource):
    resource_name = "controllerproperties"
    # all schemas
    dummy_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    unresponsive_se_reboot_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    crashed_se_reboot_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    se_offline_del_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_se_create_fail_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_se_vnic_fail_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_se_bootup_fail_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    se_vnic_cooldown_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_se_vnic_ip_fail_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    fatal_error_lease_time_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    upgrade_lease_time_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    query_host_fail_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vnic_op_fail_time_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    dns_refresh_period_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    se_create_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    max_dead_se_in_grp_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    dead_se_detection_timer_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    api_idle_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    allow_unauthenticated_nodes_schema = properties.Schema(
        properties.Schema.BOOLEAN,
        _(""),
        required=False,
        update_allowed=True,
    )
    cluster_ip_gratuitous_arp_period_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_key_rotate_period_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    secure_channel_controller_token_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    secure_channel_se_token_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    max_seq_vnic_failures_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_awaiting_se_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    vs_apic_scaleout_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _("Time to wait for the scaled out SE to become ready before marking the scaleout done, applies to APIC configuration only"
          ),
        required=False,
        update_allowed=True,
    )
    secure_channel_cleanup_timeout_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    attach_ip_retry_interval_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    attach_ip_retry_limit_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )
    persistence_key_rotate_period_schema = properties.Schema(
        properties.Schema.NUMBER,
        _(""),
        required=False,
        update_allowed=True,
    )

    # properties list
    PROPERTIES = (
        'dummy',
        'unresponsive_se_reboot',
        'crashed_se_reboot',
        'se_offline_del',
        'vs_se_create_fail',
        'vs_se_vnic_fail',
        'vs_se_bootup_fail',
        'se_vnic_cooldown',
        'vs_se_vnic_ip_fail',
        'fatal_error_lease_time',
        'upgrade_lease_time',
        'query_host_fail',
        'vnic_op_fail_time',
        'dns_refresh_period',
        'se_create_timeout',
        'max_dead_se_in_grp',
        'dead_se_detection_timer',
        'api_idle_timeout',
        'allow_unauthenticated_nodes',
        'cluster_ip_gratuitous_arp_period',
        'vs_key_rotate_period',
        'secure_channel_controller_token_timeout',
        'secure_channel_se_token_timeout',
        'max_seq_vnic_failures',
        'vs_awaiting_se_timeout',
        'vs_apic_scaleout_timeout',
        'secure_channel_cleanup_timeout',
        'attach_ip_retry_interval',
        'attach_ip_retry_limit',
        'persistence_key_rotate_period',
    )

    # mapping of properties to their schemas
    properties_schema = {
        'dummy': dummy_schema,
        'unresponsive_se_reboot': unresponsive_se_reboot_schema,
        'crashed_se_reboot': crashed_se_reboot_schema,
        'se_offline_del': se_offline_del_schema,
        'vs_se_create_fail': vs_se_create_fail_schema,
        'vs_se_vnic_fail': vs_se_vnic_fail_schema,
        'vs_se_bootup_fail': vs_se_bootup_fail_schema,
        'se_vnic_cooldown': se_vnic_cooldown_schema,
        'vs_se_vnic_ip_fail': vs_se_vnic_ip_fail_schema,
        'fatal_error_lease_time': fatal_error_lease_time_schema,
        'upgrade_lease_time': upgrade_lease_time_schema,
        'query_host_fail': query_host_fail_schema,
        'vnic_op_fail_time': vnic_op_fail_time_schema,
        'dns_refresh_period': dns_refresh_period_schema,
        'se_create_timeout': se_create_timeout_schema,
        'max_dead_se_in_grp': max_dead_se_in_grp_schema,
        'dead_se_detection_timer': dead_se_detection_timer_schema,
        'api_idle_timeout': api_idle_timeout_schema,
        'allow_unauthenticated_nodes': allow_unauthenticated_nodes_schema,
        'cluster_ip_gratuitous_arp_period':
        cluster_ip_gratuitous_arp_period_schema,
        'vs_key_rotate_period': vs_key_rotate_period_schema,
        'secure_channel_controller_token_timeout':
        secure_channel_controller_token_timeout_schema,
        'secure_channel_se_token_timeout':
        secure_channel_se_token_timeout_schema,
        'max_seq_vnic_failures': max_seq_vnic_failures_schema,
        'vs_awaiting_se_timeout': vs_awaiting_se_timeout_schema,
        'vs_apic_scaleout_timeout': vs_apic_scaleout_timeout_schema,
        'secure_channel_cleanup_timeout':
        secure_channel_cleanup_timeout_schema,
        'attach_ip_retry_interval': attach_ip_retry_interval_schema,
        'attach_ip_retry_limit': attach_ip_retry_limit_schema,
        'persistence_key_rotate_period': persistence_key_rotate_period_schema,
    }
Exemplo n.º 21
0
class DesignateDomain(resource.Resource):
    """Heat Template Resource for Designate Domain."""

    support_status = support.SupportStatus(
        version='5.0.0')

    entity = 'domains'

    PROPERTIES = (
        NAME, TTL, DESCRIPTION, EMAIL
    ) = (
        'name', 'ttl', 'description', 'email'
    )

    ATTRIBUTES = (
        SERIAL,
    ) = (
        'serial',
    )

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Domain name.'),
            required=True,
            constraints=[constraints.Length(max=255)]
        ),
        # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
        TTL: properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds).'),
            update_allowed=True,
            constraints=[constraints.Range(min=1,
                                           max=2147483647)]
        ),
        # designate mandates to the max length of 160 for description
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of domain.'),
            update_allowed=True,
            constraints=[constraints.Length(max=160)]
        ),
        EMAIL: properties.Schema(
            properties.Schema.STRING,
            _('Domain email.'),
            update_allowed=True,
            required=True
        )
    }

    attributes_schema = {
        SERIAL: attributes.Schema(
            _("DNS domain serial."),
            type=attributes.Schema.STRING
        ),
    }

    default_client_name = 'designate'

    entity = 'domains'

    def handle_create(self):
        args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
        domain = self.client_plugin().domain_create(**args)

        self.resource_id_set(domain.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict()

        if prop_diff.get(self.EMAIL):
            args['email'] = prop_diff.get(self.EMAIL)

        if prop_diff.get(self.TTL):
            args['ttl'] = prop_diff.get(self.TTL)

        if prop_diff.get(self.DESCRIPTION):
            args['description'] = prop_diff.get(self.DESCRIPTION)

        if len(args.keys()) > 0:
            args['id'] = self.resource_id
            self.client_plugin().domain_update(**args)

    def _resolve_attribute(self, name):
        if name == self.SERIAL:
            domain = self.client().domains.get(self.resource_id)
            return domain.serial

    # FIXME(kanagaraj-manickam) Remove this method once designate defect
    # 1485552 is fixed.
    def _show_resource(self):
        return dict(self.client().domains.get(self.resource_id).items())
Exemplo n.º 22
0
class FloatingIPAssociation(neutron.NeutronResource):
    PROPERTIES = (
        FLOATINGIP_ID,
        PORT_ID,
        FIXED_IP_ADDRESS,
    ) = (
        'floatingip_id',
        'port_id',
        'fixed_ip_address',
    )

    properties_schema = {
        FLOATINGIP_ID:
        properties.Schema(properties.Schema.STRING,
                          _('ID of the floating IP to associate.'),
                          required=True,
                          update_allowed=True),
        PORT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of an existing port with at least one IP address to '
              'associate with this floating IP.'),
            required=True,
            update_allowed=True),
        FIXED_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('IP address to use if the port has multiple addresses.'),
            update_allowed=True),
    }

    def handle_create(self):
        props = self.prepare_properties(self.properties, self.name)

        floatingip_id = props.pop(self.FLOATINGIP_ID)

        self.neutron().update_floatingip(floatingip_id,
                                         {'floatingip': props})['floatingip']
        self.resource_id_set('%s:%s' % (floatingip_id, props[self.PORT_ID]))

    def handle_delete(self):
        if not self.resource_id:
            return
        client = self.neutron()
        (floatingip_id, port_id) = self.resource_id.split(':')
        try:
            client.update_floatingip(floatingip_id,
                                     {'floatingip': {
                                         'port_id': None
                                     }})
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            (floatingip_id, port_id) = self.resource_id.split(':')
            neutron_client = self.neutron()
            # if the floatingip_id is changed, disassociate the port which
            # associated with the old floatingip_id
            if self.FLOATINGIP_ID in prop_diff:
                try:
                    neutron_client.update_floatingip(
                        floatingip_id, {'floatingip': {
                            'port_id': None
                        }})
                except Exception as ex:
                    self.client_plugin().ignore_not_found(ex)

            # associate the floatingip with the new port
            floatingip_id = (prop_diff.get(self.FLOATINGIP_ID)
                             or floatingip_id)
            port_id = prop_diff.get(self.PORT_ID) or port_id

            fixed_ip_address = (prop_diff.get(self.FIXED_IP_ADDRESS)
                                or self.properties.get(self.FIXED_IP_ADDRESS))

            request_body = {
                'floatingip': {
                    'port_id': port_id,
                    'fixed_ip_address': fixed_ip_address
                }
            }

            neutron_client.update_floatingip(floatingip_id, request_body)
            self.resource_id_set('%s:%s' % (floatingip_id, port_id))
Exemplo n.º 23
0
class HeatVnSubnet(contrail.ContrailResource):
    PROPERTIES = (
        NAME,
        NETWORK,
        IP_PREFIX,
        DEFAULT_GATEWAY,
        IPAM,
        ENABLE_DHCP,
        DNS_NAMESERVERS,
        ALLOCATION_POOLS,
        HOST_ROUTES,
    ) = (
        'name',
        'network',
        'ip_prefix',
        'default_gateway',
        'ipam',
        'enable_dhcp',
        'dns_nameservers',
        'allocation_pools',
        'host_routes',
    )

    _ALLOCATION_POOL_KEYS = (
        ALLOCATION_POOL_START,
        ALLOCATION_POOL_END,
    ) = (
        'start',
        'end',
    )

    _HOST_ROUTES_KEYS = (
        ROUTE_DESTINATION,
        ROUTE_NEXTHOP,
    ) = (
        'destination',
        'nexthop',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Virtual Network Subnet name'),
            update_allowed=True,
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Network ID this subnet belongs to.'),
            required=True,
            update_allowed=False,
        ),
        IP_PREFIX:
        properties.Schema(
            properties.Schema.STRING,
            _('IP prefix of subnet.'),
            required=True,
        ),
        DEFAULT_GATEWAY:
        properties.Schema(
            properties.Schema.STRING,
            _('Default gateway of subnet.'),
            required=True,
            update_allowed=True,
        ),
        IPAM:
        properties.Schema(
            properties.Schema.STRING,
            _('IPAM this subnet uses.'),
            default=None,
            update_allowed=True,
        ),
        ENABLE_DHCP:
        properties.Schema(
            properties.Schema.STRING,
            _('Set to true if DHCP is enabled and false if DHCP is disabled.'),
            default="True",
            constraints=[
                constraints.AllowedValues(['True', 'False']),
            ],
            update_allowed=True),
        DNS_NAMESERVERS:
        properties.Schema(properties.Schema.LIST,
                          _('A specified set of DNS name servers to be used.'),
                          default=[],
                          update_allowed=True),
        ALLOCATION_POOLS:
        properties.Schema(
            properties.Schema.LIST,
            _('The start and end addresses for the allocation pools.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOCATION_POOL_START:
                    properties.Schema(properties.Schema.STRING,
                                      required=True,
                                      update_allowed=False),
                    ALLOCATION_POOL_END:
                    properties.Schema(properties.Schema.STRING,
                                      required=True,
                                      update_allowed=False),
                },
            ),
            update_allowed=False),
        HOST_ROUTES:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  ROUTE_DESTINATION:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  ROUTE_NEXTHOP:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                              },
                          )),
    }

    attributes_schema = {
        "name":
        attributes.Schema(_('The name of the Virtual Network.'), ),
        "network":
        attributes.Schema(_('Network ID this subnet belongs to.'), ),
        "ip_prefix":
        attributes.Schema(_('IP prefix of subnet.'), ),
        "default_gateway":
        attributes.Schema(_('Default gateway of subnet.'), ),
        "ipam":
        attributes.Schema(_('IPAM this subnet uses.'), ),
        "subnet_uuid":
        attributes.Schema(_('UUID of subnet.'), ),
        "subnet_name":
        attributes.Schema(_('Name of subnet.'), ),
        "enable_dhcp":
        attributes.Schema(
            _('True if DHCP is enabled for this subnet; False otherwise.'), ),
        "show":
        attributes.Schema(_('All attributes.'), ),
    }

    update_allowed_keys = ('Properties', )

    def _get_subnets(self, vn_obj, ipam):
        subnets = []
        for ref in vn_obj.get_network_ipam_refs() or []:
            if ref['to'] == ipam.get_fq_name():
                subnets = ref['attr'].get_ipam_subnets()
                break
        return subnets

    def _get_ipam(self):
        ipam = self.properties[self.IPAM]
        if ipam:
            try:
                ipam_obj = self.vnc_lib().network_ipam_read(id=ipam)
            except vnc_api.NoIdError:
                ipam_obj = self.vnc_lib().network_ipam_read(fq_name_str=ipam)
        else:
            tenant_id = self.stack.context.tenant_id
            project_obj = self.vnc_lib().project_read(
                id=str(uuid.UUID(tenant_id)))
            ipam_fq_name = project_obj.get_fq_name() + ['default-network-ipam']
            try:
                ipam_obj = self.vnc_lib().network_ipam_read(
                    fq_name=ipam_fq_name)
            except vnc_api.NoIdError:
                ipam_obj = vnc_api.NetworkIpam('default-network-ipam',
                                               project_obj)
                ipam_id = self.vnc_lib().network_ipam_create(ipam_obj)
                ipam_obj = self.vnc_lib().network_ipam_read(id=ipam_id)
        return ipam_obj

    def _update_subnet(self, subnet, props):
        if props.get(self.ENABLE_DHCP) == "True":
            subnet.set_enable_dhcp(True)
        else:
            subnet.set_enable_dhcp(False)
        dns_servers = props.get(self.DNS_NAMESERVERS)
        if dns_servers:
            subnet.set_dns_nameservers(dns_servers)
        host_routes = props.get(self.HOST_ROUTES)
        if host_routes:
            subnet.set_host_routes(host_routes)

    def handle_create(self):
        try:
            vn_obj = self.vnc_lib().virtual_network_read(
                id=self.properties[self.NETWORK])
        except vnc_api.NoIdError:
            vn_obj = self.vnc_lib().virtual_network_read(
                fq_name_str=self.properties[self.NETWORK])

        net = netaddr.IPNetwork(self.properties[self.IP_PREFIX])
        if self.properties[self.ENABLE_DHCP] == "True":
            enable_dhcp = True
        else:
            enable_dhcp = False
        ipam = self._get_ipam()
        subnets = self._get_subnets(vn_obj, ipam)
        subnet_uuid = str(uuid.uuid4())
        subnet = vnc_api.IpamSubnetType(
            subnet_name=self.properties[self.NAME],
            subnet=vnc_api.SubnetType(str(net.ip), net.prefixlen),
            default_gateway=self.properties[self.DEFAULT_GATEWAY],
            allocation_pools=self.properties[self.ALLOCATION_POOLS],
            enable_dhcp=enable_dhcp,
            dns_nameservers=self.properties[self.DNS_NAMESERVERS],
            host_routes=self.properties[self.HOST_ROUTES],
            subnet_uuid=subnet_uuid)
        if subnets:
            subnets.append(subnet)
            vn_obj._pending_field_updates.add('network_ipam_refs')
        else:
            vn_obj.add_network_ipam(ipam, vnc_api.VnSubnetsType([subnet]))
        self.vnc_lib().virtual_network_update(vn_obj)
        self.resource_id_set(subnet_uuid)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        subnet_uuid = self.resource_id
        try:
            vn_obj = self.vnc_lib().virtual_network_read(
                id=self.properties[self.NETWORK])
        except vnc_api.NoIdError:
            vn_obj = self.vnc_lib().virtual_network_read(
                fq_name_str=self.properties[self.NETWORK])

        ipam = self._get_ipam()
        subnets = self._get_subnets(vn_obj, ipam)
        for subnet in subnets:
            if (subnet.get_subnet_uuid() and subnet.get_subnet_uuid() == str(
                    uuid.UUID(subnet_uuid))):
                self._update_subnet(subnet, prop_diff)
                vn_obj._pending_field_updates.add('network_ipam_refs')
                break
        self.vnc_lib().virtual_network_update(vn_obj)

    def handle_delete(self):
        subnet_uuid = self.resource_id
        if subnet_uuid:
            try:
                vn_obj = self.vnc_lib().virtual_network_read(
                    id=self.properties[self.NETWORK])
            except vnc_api.NoIdError:
                vn_obj = self.vnc_lib().virtual_network_read(
                    fq_name_str=self.properties[self.NETWORK])

            ipam = self._get_ipam()
            subnets = self._get_subnets(vn_obj, ipam)
            for subnet in subnets:
                if (subnet.get_subnet_uuid()
                        and subnet.get_subnet_uuid() == str(
                            uuid.UUID(subnet_uuid))):
                    subnets.remove(subnet)
                    vn_obj._pending_field_updates.add('network_ipam_refs')
                    break
            if not subnets:
                vn_obj.del_network_ipam(ipam)
            self.vnc_lib().virtual_network_update(vn_obj)

    def _show_resource(self):
        dic = {}
        dic['network'] = self.properties[self.NETWORK]
        dic['ip_block'] = self.properties[self.IP_PREFIX]
        dic['gateway'] = self.properties[self.DEFAULT_GATEWAY]
        dic['subnet_uuid'] = self.resource_id
        dic['ipam'] = self.get_ipam().get_uuid()
        dic['name'] = self.properties[self.NAME]
Exemplo n.º 24
0
class FloatingIP(neutron.NeutronResource):
    PROPERTIES = (
        FLOATING_NETWORK_ID,
        FLOATING_NETWORK,
        VALUE_SPECS,
        PORT_ID,
        FIXED_IP_ADDRESS,
    ) = (
        'floating_network_id',
        'floating_network',
        'value_specs',
        'port_id',
        'fixed_ip_address',
    )

    ATTRIBUTES = (
        ROUTER_ID,
        TENANT_ID,
        FLOATING_NETWORK_ID_ATTR,
        FIXED_IP_ADDRESS_ATTR,
        FLOATING_IP_ADDRESS_ATTR,
        PORT_ID_ATTR,
        SHOW,
    ) = (
        'router_id',
        'tenant_id',
        'floating_network_id',
        'fixed_ip_address',
        'floating_ip_address',
        'port_id',
        'show',
    )

    properties_schema = {
        FLOATING_NETWORK_ID:
        properties.Schema(properties.Schema.STRING,
                          support_status=support.SupportStatus(
                              support.DEPRECATED,
                              _('Use property %s.') % FLOATING_NETWORK),
                          required=False),
        FLOATING_NETWORK:
        properties.Schema(properties.Schema.STRING,
                          _('Network to allocate floating IP from.'),
                          required=False),
        VALUE_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the "floatingip" object in the '
              'creation request. Parameters are often specific to installed '
              'hardware or extensions.'),
            default={}),
        PORT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of an existing port with at least one IP address to '
              'associate with this floating IP.'),
            update_allowed=True),
        FIXED_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('IP address to use if the port has multiple addresses.'),
            update_allowed=True),
    }

    attributes_schema = {
        ROUTER_ID:
        attributes.Schema(
            _('ID of the router used as gateway, set when associated with a '
              'port.')),
        TENANT_ID:
        attributes.Schema(_('The tenant owning this floating IP.')),
        FLOATING_NETWORK_ID_ATTR:
        attributes.Schema(
            _('ID of the network in which this IP is allocated.')),
        FIXED_IP_ADDRESS_ATTR:
        attributes.Schema(
            _('IP address of the associated port, if specified.')),
        FLOATING_IP_ADDRESS_ATTR:
        attributes.Schema(_('The allocated address of this IP.')),
        PORT_ID_ATTR:
        attributes.Schema(_('ID of the port associated with this IP.')),
        SHOW:
        attributes.Schema(_('All attributes.')),
    }

    def add_dependencies(self, deps):
        super(FloatingIP, self).add_dependencies(deps)

        for resource in self.stack.itervalues():
            # depend on any RouterGateway in this template with the same
            # network_id as this floating_network_id
            if resource.has_interface('OS::Neutron::RouterGateway'):
                gateway_network = resource.properties.get(
                    router.RouterGateway.NETWORK) or resource.properties.get(
                        router.RouterGateway.NETWORK_ID)
                floating_network = self.properties.get(
                    self.FLOATING_NETWORK) or self.properties.get(
                        self.FLOATING_NETWORK_ID)
                if gateway_network == floating_network:
                    deps += (self, resource)

            # depend on any RouterInterface in this template which interfaces
            # with the same subnet that this floating IP's port is assigned
            # to
            elif resource.has_interface('OS::Neutron::RouterInterface'):

                def port_on_subnet(resource, subnet):
                    if not resource.has_interface('OS::Neutron::Port'):
                        return False
                    for fixed_ip in resource.properties.get(
                            port.Port.FIXED_IPS):

                        port_subnet = (
                            fixed_ip.properties.get(port.Port.FIXED_IP_SUBNET)
                            or fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID))
                        return subnet == port_subnet
                    return False

                interface_subnet = (resource.properties.get(
                    router.RouterInterface.SUBNET) or resource.properties.get(
                        router.RouterInterface.SUBNET_ID))
                for d in deps.required_by(self):
                    if port_on_subnet(d, interface_subnet):
                        deps += (self, resource)
                        break

    def validate(self):
        super(FloatingIP, self).validate()
        self._validate_depr_property_required(self.properties,
                                              self.FLOATING_NETWORK,
                                              self.FLOATING_NETWORK_ID)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        self.client_plugin().resolve_network(props, self.FLOATING_NETWORK,
                                             'floating_network_id')
        fip = self.neutron().create_floatingip({'floatingip':
                                                props})['floatingip']
        self.resource_id_set(fip['id'])

    def _show_resource(self):
        return self.neutron().show_floatingip(self.resource_id)['floatingip']

    def handle_delete(self):
        client = self.neutron()
        try:
            client.delete_floatingip(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            neutron_client = self.neutron()

            port_id = prop_diff.get(self.PORT_ID,
                                    self.properties.get(self.PORT_ID))

            fixed_ip_address = prop_diff.get(
                self.FIXED_IP_ADDRESS,
                self.properties.get(self.FIXED_IP_ADDRESS))

            request_body = {
                'floatingip': {
                    'port_id': port_id,
                    'fixed_ip_address': fixed_ip_address
                }
            }

            neutron_client.update_floatingip(self.resource_id, request_body)
Exemplo n.º 25
0
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        AVAILABILITY_ZONES,
        LAUNCH_CONFIGURATION_NAME,
        MAX_SIZE,
        MIN_SIZE,
        COOLDOWN,
        DESIRED_CAPACITY,
        HEALTH_CHECK_GRACE_PERIOD,
        HEALTH_CHECK_TYPE,
        LOAD_BALANCER_NAMES,
        VPCZONE_IDENTIFIER,
        TAGS,
    ) = (
        'AvailabilityZones',
        'LaunchConfigurationName',
        'MaxSize',
        'MinSize',
        'Cooldown',
        'DesiredCapacity',
        'HealthCheckGracePeriod',
        'HealthCheckType',
        'LoadBalancerNames',
        'VPCZoneIdentifier',
        'Tags',
    )

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE) = (
        'AutoScalingRollingUpdate')

    _ROLLING_UPDATE_SCHEMA_KEYS = (MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE,
                                   PAUSE_TIME) = ('MinInstancesInService',
                                                  'MaxBatchSize', 'PauseTime')

    ATTRIBUTES = (INSTANCE_LIST, ) = ('InstanceList', )

    properties_schema = {
        AVAILABILITY_ZONES:
        properties.Schema(properties.Schema.LIST,
                          _('Not Implemented.'),
                          required=True),
        LAUNCH_CONFIGURATION_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The reference to a LaunchConfiguration resource.'),
            required=True,
            update_allowed=True),
        MAX_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Maximum number of instances in the group.'),
                          required=True,
                          update_allowed=True),
        MIN_SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Minimum number of instances in the group.'),
                          required=True,
                          update_allowed=True),
        COOLDOWN:
        properties.Schema(properties.Schema.NUMBER,
                          _('Cooldown period, in seconds.'),
                          update_allowed=True),
        DESIRED_CAPACITY:
        properties.Schema(properties.Schema.INTEGER,
                          _('Desired initial number of instances.'),
                          update_allowed=True),
        HEALTH_CHECK_GRACE_PERIOD:
        properties.Schema(properties.Schema.INTEGER,
                          _('Not Implemented.'),
                          implemented=False),
        HEALTH_CHECK_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          constraints=[
                              constraints.AllowedValues(['EC2', 'ELB']),
                          ],
                          implemented=False),
        LOAD_BALANCER_NAMES:
        properties.Schema(properties.Schema.LIST,
                          _('List of LoadBalancer resources.')),
        VPCZONE_IDENTIFIER:
        properties.Schema(
            properties.Schema.LIST,
            _('Use only with Neutron, to list the internal subnet to '
              'which the instance will be attached; '
              'needed only if multiple exist; '
              'list length must be exactly 1.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('UUID of the internal subnet to which the instance '
                  'will be attached.'))),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          _('Tags to attach to this group.'),
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                              },
                          )),
    }

    attributes_schema = {
        INSTANCE_LIST:
        attributes.Schema(
            _("A comma-delimited list of server ip addresses. "
              "(Heat extension).")),
    }

    rolling_update_schema = {
        MIN_INSTANCES_IN_SERVICE:
        properties.Schema(properties.Schema.INTEGER, default=0),
        MAX_BATCH_SIZE:
        properties.Schema(properties.Schema.INTEGER, default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.STRING, default='PT0S')
    }

    update_policy_schema = {
        ROLLING_UPDATE:
        properties.Schema(properties.Schema.MAP, schema=rolling_update_schema)
    }

    def handle_create(self):
        self.validate_launchconfig()
        return self.create_with_template(self.child_template(),
                                         self._environment())

    def _get_conf_properties(self):
        conf, props = super(AutoScalingGroup, self)._get_conf_properties()
        vpc_zone_ids = self.properties.get(AutoScalingGroup.VPCZONE_IDENTIFIER)
        if vpc_zone_ids:
            props['SubnetId'] = vpc_zone_ids[0]

        return conf, props

    def check_create_complete(self, task):
        """Invoke the cooldown after creation succeeds."""
        done = super(AutoScalingGroup, self).check_create_complete(task)
        if done:
            self._cooldown_timestamp(
                "%s : %s" % (EXACT_CAPACITY, grouputils.get_size(self)))
        return done

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """
        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if tmpl_diff:
            # parse update policy
            if 'UpdatePolicy' in tmpl_diff:
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff:
            # Replace instances first if launch configuration has changed
            self._try_rolling_update(prop_diff)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            self.adjust(self.properties[self.DESIRED_CAPACITY],
                        adjustment_type=EXACT_CAPACITY)
        else:
            current_capacity = grouputils.get_size(self)
            self.adjust(current_capacity, adjustment_type=EXACT_CAPACITY)

    def adjust(self, adjustment, adjustment_type=CHANGE_IN_CAPACITY):
        """
        Adjust the size of the scaling group if the cooldown permits.
        """
        if self._cooldown_inprogress():
            LOG.info(
                _LI("%(name)s NOT performing scaling adjustment, "
                    "cooldown %(cooldown)s"), {
                        'name': self.name,
                        'cooldown': self.properties[self.COOLDOWN]
                    })
            return

        capacity = grouputils.get_size(self)
        lower = self.properties[self.MIN_SIZE]
        upper = self.properties[self.MAX_SIZE]

        new_capacity = _calculate_new_capacity(capacity, adjustment,
                                               adjustment_type, lower, upper)
        total = grouputils.get_size(self, include_failed=True)
        # if there are failed resources in nested_stack, has to change
        if new_capacity == total:
            LOG.debug('no change in capacity %d' % capacity)
            return

        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()
            },
            'suffix': 'start',
        }
        notification.send(**notif)
        try:
            self.resize(new_capacity)
        except Exception as resize_ex:
            with excutils.save_and_reraise_exception():
                try:
                    notif.update({
                        'suffix': 'error',
                        'message': six.text_type(resize_ex),
                    })
                    notification.send(**notif)
                except Exception:
                    LOG.exception(_LE('Failed sending error notification'))
        else:
            notif.update({
                'suffix': 'end',
                'capacity': new_capacity,
                'message': _("End resizing the group %(group)s") % {
                    'group': notif['groupname']
                },
            })
            notification.send(**notif)

        self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))

    def _tags(self):
        """Add Identifing Tags to all servers in the group.

        This is so the Dimensions received from cfn-push-stats all include
        the groupname and stack id.
        Note: the group name must match what is returned from FnGetRefId
        """
        autoscaling_tag = [{
            self.TAG_KEY: 'metering.AutoScalingGroupName',
            self.TAG_VALUE: self.FnGetRefId()
        }]
        return super(AutoScalingGroup, self)._tags() + autoscaling_tag

    def validate(self):
        res = super(AutoScalingGroup, self).validate()
        if res:
            return res

        # check validity of group size
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]

        if max_size < min_size:
            msg = _("MinSize can not be greater than MaxSize")
            raise exception.StackValidationFailed(message=msg)

        if min_size < 0:
            msg = _("The size of AutoScalingGroup can not be less than zero")
            raise exception.StackValidationFailed(message=msg)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            desired_capacity = self.properties[self.DESIRED_CAPACITY]
            if desired_capacity < min_size or desired_capacity > max_size:
                msg = _("DesiredCapacity must be between MinSize and MaxSize")
                raise exception.StackValidationFailed(message=msg)

        # TODO(pasquier-s): once Neutron is able to assign subnets to
        # availability zones, it will be possible to specify multiple subnets.
        # For now, only one subnet can be specified. The bug #1096017 tracks
        # this issue.
        if (self.properties.get(self.VPCZONE_IDENTIFIER)
                and len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
            raise exception.NotSupported(feature=_("Anything other than one "
                                                   "VPCZoneIdentifier"))

    def _resolve_attribute(self, name):
        '''
        heat extension: "InstanceList" returns comma delimited list of server
        ip addresses.
        '''
        if name == self.INSTANCE_LIST:
            return u','.join(
                inst.FnGetAtt('PublicIp')
                for inst in grouputils.get_members(self)) or None

    def child_template(self):
        if self.properties[self.DESIRED_CAPACITY]:
            num_instances = self.properties[self.DESIRED_CAPACITY]
        else:
            num_instances = self.properties[self.MIN_SIZE]
        return self._create_template(num_instances)
Exemplo n.º 26
0
class SwiftContainer(resource.Resource):
    PROPERTIES = (
        NAME,
        X_CONTAINER_READ,
        X_CONTAINER_WRITE,
        X_CONTAINER_META,
        X_ACCOUNT_META,
        PURGE_ON_DELETE,
    ) = (
        'name',
        'X-Container-Read',
        'X-Container-Write',
        'X-Container-Meta',
        'X-Account-Meta',
        'PurgeOnDelete',
    )

    ATTRIBUTES = (
        DOMAIN_NAME,
        WEBSITE_URL,
        ROOT_URL,
        OBJECT_COUNT,
        BYTES_USED,
        HEAD_CONTAINER,
    ) = (
        'DomainName',
        'WebsiteURL',
        'RootURL',
        'ObjectCount',
        'BytesUsed',
        'HeadContainer',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name for the container. If not specified, a unique name will '
              'be generated.')),
        X_CONTAINER_READ:
        properties.Schema(
            properties.Schema.STRING,
            _('Specify the ACL permissions on who can read objects in the '
              'container.')),
        X_CONTAINER_WRITE:
        properties.Schema(
            properties.Schema.STRING,
            _('Specify the ACL permissions on who can write objects to the '
              'container.')),
        X_CONTAINER_META:
        properties.Schema(
            properties.Schema.MAP,
            _('A map of user-defined meta data to associate with the '
              'container. Each key in the map will set the header '
              'X-Container-Meta-{key} with the corresponding value.'),
            default={}),
        X_ACCOUNT_META:
        properties.Schema(
            properties.Schema.MAP,
            _('A map of user-defined meta data to associate with the '
              'account. Each key in the map will set the header '
              'X-Account-Meta-{key} with the corresponding value.'),
            default={}),
        PURGE_ON_DELETE:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("If True, delete any objects in the container "
              "when the container is deleted. "
              "Otherwise, deleting a non-empty container "
              "will result in an error."),
            default=False,
            support_status=support.SupportStatus(version='2015.1')),
    }

    attributes_schema = {
        DOMAIN_NAME:
        attributes.Schema(_('The host from the container URL.'),
                          type=attributes.Schema.STRING),
        WEBSITE_URL:
        attributes.Schema(_('The URL of the container.'),
                          type=attributes.Schema.STRING),
        ROOT_URL:
        attributes.Schema(_('The parent URL of the container.'),
                          type=attributes.Schema.STRING),
        OBJECT_COUNT:
        attributes.Schema(_('The number of objects stored in the container.'),
                          type=attributes.Schema.INTEGER),
        BYTES_USED:
        attributes.Schema(_('The number of bytes stored in the container.'),
                          type=attributes.Schema.INTEGER),
        HEAD_CONTAINER:
        attributes.Schema(_('A map containing all headers for the container.'),
                          type=attributes.Schema.MAP),
    }

    default_client_name = 'swift'

    def physical_resource_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return super(SwiftContainer, self).physical_resource_name()

    @staticmethod
    def _build_meta_headers(obj_type, meta_props):
        """Returns a new dict.

        Each key of new dict is prepended with "X-Container-Meta-".
        """
        if meta_props is None:
            return {}
        return dict(('X-' + obj_type.title() + '-Meta-' + k, v)
                    for (k, v) in meta_props.items())

    def handle_create(self):
        """Create a container."""
        container = self.physical_resource_name()

        container_headers = SwiftContainer._build_meta_headers(
            "container", self.properties[self.X_CONTAINER_META])

        account_headers = SwiftContainer._build_meta_headers(
            "account", self.properties[self.X_ACCOUNT_META])

        for key in (self.X_CONTAINER_READ, self.X_CONTAINER_WRITE):
            if self.properties[key] is not None:
                container_headers[key] = self.properties[key]

        LOG.debug(
            'SwiftContainer create container %(container)s with '
            'container headers %(container_headers)s and '
            'account headers %(account_headers)s' % {
                'container': container,
                'account_headers': account_headers,
                'container_headers': container_headers
            })

        self.client().put_container(container, container_headers)

        if account_headers:
            self.client().post_account(account_headers)

        self.resource_id_set(container)

    def _get_objects(self):
        try:
            container, objects = self.client().get_container(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return None
        return objects

    def _deleter(self, obj=None):
        """Delete the underlying container or an object inside it."""
        args = [self.resource_id]
        if obj:
            deleter = self.client().delete_object
            args.append(obj['name'])
        else:
            deleter = self.client().delete_container
        try:
            deleter(*args)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)

    def handle_delete(self):
        if self.resource_id is None:
            return

        objects = self._get_objects()

        if objects:
            if self.properties[self.PURGE_ON_DELETE]:
                self._deleter(objects.pop())  # save first container refresh
            else:
                msg = _("Deleting non-empty container (%(id)s) "
                        "when %(prop)s is False") % {
                            'id': self.resource_id,
                            'prop': self.PURGE_ON_DELETE
                        }
                raise exception.ResourceActionNotSupported(action=msg)
        # objects is either None (container is gone already) or (empty) list
        if objects is not None:
            objects = len(objects)
        return objects

    def check_delete_complete(self, objects):
        if objects is None:  # resource was not created or is gone already
            return True
        if objects:  # integer >=0 from the first invocation
            objs = self._get_objects()
            if objs is None:
                return True  # container is gone already
            if objs:
                self._deleter(objs.pop())
                if objs:  # save one last _get_objects() API call
                    return False

        self._deleter()
        return True

    def handle_check(self):
        self.client().get_container(self.resource_id)

    def get_reference_id(self):
        return six.text_type(self.resource_id)

    def _resolve_attribute(self, key):
        parsed = list(urlparse.urlparse(self.client().url))
        if key == self.DOMAIN_NAME:
            return parsed[1].split(':')[0]
        elif key == self.WEBSITE_URL:
            return '%s://%s%s/%s' % (parsed[0], parsed[1], parsed[2],
                                     self.resource_id)
        elif key == self.ROOT_URL:
            return '%s://%s%s' % (parsed[0], parsed[1], parsed[2])
        elif self.resource_id and key in (self.OBJECT_COUNT, self.BYTES_USED,
                                          self.HEAD_CONTAINER):
            try:
                headers = self.client().head_container(self.resource_id)
            except Exception as ex:
                if self.client_plugin().is_client_exception(ex):
                    LOG.warn(_LW("Head container failed: %s"), ex)
                    return None
                raise
            else:
                if key == self.OBJECT_COUNT:
                    return headers['x-container-object-count']
                elif key == self.BYTES_USED:
                    return headers['x-container-bytes-used']
                elif key == self.HEAD_CONTAINER:
                    return headers

    def _show_resource(self):
        return self.client().head_container(self.resource_id)
Exemplo n.º 27
0
Arquivo: user.py Projeto: zzjeric/heat
class AccessKey(resource.Resource):
    PROPERTIES = (
        SERIAL,
        USER_NAME,
        STATUS,
    ) = (
        'Serial',
        'UserName',
        'Status',
    )

    ATTRIBUTES = (
        USER_NAME,
        SECRET_ACCESS_KEY,
    ) = (
        'UserName',
        'SecretAccessKey',
    )

    properties_schema = {
        SERIAL:
        properties.Schema(properties.Schema.INTEGER,
                          _('Not Implemented.'),
                          implemented=False),
        USER_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The name of the user that the new key will belong to.'),
            required=True),
        STATUS:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          constraints=[
                              constraints.AllowedValues(['Active',
                                                         'Inactive']),
                          ],
                          implemented=False),
    }

    attributes_schema = {
        USER_NAME:
        attributes.Schema(_('Username associated with the AccessKey.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
        SECRET_ACCESS_KEY:
        attributes.Schema(_('Keypair secret key.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
    }

    def __init__(self, name, json_snippet, stack):
        super(AccessKey, self).__init__(name, json_snippet, stack)
        self._secret = None
        if self.resource_id:
            self._register_access_key()

    def _get_user(self):
        """Derive the keystone userid, stored in the User resource_id.

        Helper function to derive the keystone userid, which is stored in the
        resource_id of the User associated with this key. We want to avoid
        looking the name up via listing keystone users, as this requires admin
        rights in keystone, so FnGetAtt which calls _secret_accesskey won't
        work for normal non-admin users.
        """
        # Lookup User resource by intrinsic reference (which is what is passed
        # into the UserName parameter.  Would be cleaner to just make the User
        # resource return resource_id for FnGetRefId but the AWS definition of
        # user does say it returns a user name not ID
        return self.stack.resource_by_refid(self.properties[self.USER_NAME])

    def handle_create(self):
        user = self._get_user()
        if user is None:
            raise exception.NotFound(
                _('could not find user %s') % self.properties[self.USER_NAME])
        # The keypair is actually created and owned by the User resource
        kp = user._create_keypair()
        self.resource_id_set(kp.access)
        self._secret = kp.secret
        self._register_access_key()

        # Store the secret key, encrypted, in the DB so we don't have lookup
        # the user every time someone requests the SecretAccessKey attribute
        self.data_set('secret_key', kp.secret, redact=True)
        self.data_set('credential_id', kp.id, redact=True)

    def handle_delete(self):
        self._secret = None
        if self.resource_id is None:
            return

        user = self._get_user()
        if user is None:
            LOG.debug('Error deleting %s - user not found', str(self))
            return
        user._delete_keypair()

    def _secret_accesskey(self):
        """Return the user's access key.

        Fetching it from keystone if necessary.
        """
        if self._secret is None:
            if not self.resource_id:
                LOG.info(
                    'could not get secret for %(username)s '
                    'Error:%(msg)s', {
                        'username': self.properties[self.USER_NAME],
                        'msg': "resource_id not yet set"
                    })
            else:
                # First try to retrieve the secret from resource_data, but
                # for backwards compatibility, fall back to requesting from
                # keystone
                self._secret = self.data().get('secret_key')
                if self._secret is None:
                    try:
                        user_id = self._get_user().resource_id
                        kp = self.keystone().get_ec2_keypair(
                            user_id=user_id, access=self.resource_id)
                        self._secret = kp.secret
                        # Store the key in resource_data
                        self.data_set('secret_key', kp.secret, redact=True)
                        # And the ID of the v3 credential
                        self.data_set('credential_id', kp.id, redact=True)
                    except Exception as ex:
                        LOG.info(
                            'could not get secret for %(username)s '
                            'Error:%(msg)s', {
                                'username': self.properties[self.USER_NAME],
                                'msg': ex
                            })

        return self._secret or '000-000-000'

    def _resolve_attribute(self, name):
        if name == self.USER_NAME:
            return self.properties[self.USER_NAME]
        elif name == self.SECRET_ACCESS_KEY:
            return self._secret_accesskey()

    def _register_access_key(self):
        def access_allowed(resource_name):
            return self._get_user().access_allowed(resource_name)

        self.stack.register_access_allowed_handler(self.resource_id,
                                                   access_allowed)
Exemplo n.º 28
0
class ElasticIpAssociation(resource.Resource):
    PROPERTIES = (
        INSTANCE_ID, EIP, ALLOCATION_ID, NETWORK_INTERFACE_ID,
    ) = (
        'InstanceId', 'EIP', 'AllocationId', 'NetworkInterfaceId',
    )

    properties_schema = {
        INSTANCE_ID: properties.Schema(
            properties.Schema.STRING,
            _('Instance ID to associate with EIP specified by EIP property.')
        ),
        EIP: properties.Schema(
            properties.Schema.STRING,
            _('EIP address to associate with instance.')
        ),
        ALLOCATION_ID: properties.Schema(
            properties.Schema.STRING,
            _('Allocation ID for VPC EIP address.')
        ),
        NETWORK_INTERFACE_ID: properties.Schema(
            properties.Schema.STRING,
            _('Network interface ID to associate with EIP.')
        ),
    }

    def FnGetRefId(self):
        return unicode(self.physical_resource_name())

    def handle_create(self):
        """Add a floating IP address to a server."""
        if self.properties[self.EIP] is not None \
                and self.properties[self.ALLOCATION_ID] is not None:
                    raise exception.ResourcePropertyConflict(
                        self.EIP,
                        self.ALLOCATION_ID)

        if self.properties[self.EIP]:
            if not self.properties[self.INSTANCE_ID]:
                logger.warn(_('Skipping association, InstanceId not '
                            'specified'))
                return
            server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
            server.add_floating_ip(self.properties[self.EIP])
            self.resource_id_set(self.properties[self.EIP])
            logger.debug(_('ElasticIpAssociation '
                           '%(instance)s.add_floating_ip(%(eip)s)'),
                         {'instance': self.properties[self.INSTANCE_ID],
                          'eip': self.properties[self.EIP]})
        elif self.properties[self.ALLOCATION_ID]:
            assert clients.neutronclient, "Neutron required for VPC operations"
            port_id = None
            port_rsrc = None
            if self.properties[self.NETWORK_INTERFACE_ID]:
                port_id = self.properties[self.NETWORK_INTERFACE_ID]
                port_rsrc = self.neutron().list_ports(id=port_id)['ports'][0]
            elif self.properties[self.INSTANCE_ID]:
                instance_id = self.properties[self.INSTANCE_ID]
                ports = self.neutron().list_ports(device_id=instance_id)
                port_rsrc = ports['ports'][0]
                port_id = port_rsrc['id']
            else:
                logger.warn(_('Skipping association, resource not specified'))
                return

            float_id = self.properties[self.ALLOCATION_ID]
            self.resource_id_set(float_id)

            # assuming only one fixed_ip
            subnet_id = port_rsrc['fixed_ips'][0]['subnet_id']
            subnets = self.neutron().list_subnets(id=subnet_id)
            subnet_rsrc = subnets['subnets'][0]
            netid = subnet_rsrc['network_id']

            router = VPC.router_for_vpc(self.neutron(), netid)
            if router is not None:
                floatingip = self.neutron().show_floatingip(float_id)
                floating_net_id = \
                    floatingip['floatingip']['floating_network_id']
                self.neutron().add_gateway_router(
                    router['id'], {'network_id': floating_net_id})

            self.neutron().update_floatingip(
                float_id, {'floatingip': {'port_id': port_id}})

    def handle_delete(self):
        """Remove a floating IP address from a server or port."""
        if self.properties[self.EIP]:
            try:
                instance_id = self.properties[self.INSTANCE_ID]
                server = self.nova().servers.get(instance_id)
                if server:
                    server.remove_floating_ip(self.properties[self.EIP])
            except clients.novaclient.exceptions.NotFound:
                pass
        elif self.properties[self.ALLOCATION_ID]:
            float_id = self.properties[self.ALLOCATION_ID]
            ne = clients.neutronclient.exceptions.NeutronClientException
            try:
                self.neutron().update_floatingip(
                    float_id, {'floatingip': {'port_id': None}})
            except ne as e:
                if e.status_code != 404:
                    raise e
Exemplo n.º 29
0
 class DummyResource(object):
     properties_schema = {
         "Foo": properties.Schema(properties.STRING, required=True)
     }
     attributes_schema = {}
Exemplo n.º 30
0
class Pool(neutron.NeutronResource):
    """
    A resource for managing load balancer pools in Neutron.
    """

    PROPERTIES = (
        PROTOCOL, SUBNET_ID, LB_METHOD, NAME, DESCRIPTION,
        ADMIN_STATE_UP, VIP, MONITORS,
    ) = (
        'protocol', 'subnet_id', 'lb_method', 'name', 'description',
        'admin_state_up', 'vip', 'monitors',
    )

    _VIP_KEYS = (
        VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS,
        VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT,
        VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP,
    ) = (
        'name', 'description', 'subnet', 'address',
        'connection_limit', 'protocol_port',
        'session_persistence', 'admin_state_up',
    )

    _VIP_SESSION_PERSISTENCE_KEYS = (
        VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME,
    ) = (
        'type', 'cookie_name',
    )

    properties_schema = {
        PROTOCOL: properties.Schema(
            properties.Schema.STRING,
            _('Protocol for balancing.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        SUBNET_ID: properties.Schema(
            properties.Schema.STRING,
            _('The subnet for the port on which the members '
              'of the pool will be connected.'),
            required=True
        ),
        LB_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The algorithm used to distribute load between the members of '
              'the pool.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ROUND_ROBIN',
                                           'LEAST_CONNECTIONS', 'SOURCE_IP']),
            ],
            update_allowed=True
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the pool.')
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the pool.'),
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of this pool.'),
            default=True,
            update_allowed=True
        ),
        VIP: properties.Schema(
            properties.Schema.MAP,
            _('IP address and port of the pool.'),
            schema={
                VIP_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Name of the vip.')
                ),
                VIP_DESCRIPTION: properties.Schema(
                    properties.Schema.STRING,
                    _('Description of the vip.')
                ),
                VIP_SUBNET: properties.Schema(
                    properties.Schema.STRING,
                    _('Subnet of the vip.')
                ),
                VIP_ADDRESS: properties.Schema(
                    properties.Schema.STRING,
                    _('IP address of the vip.')
                ),
                VIP_CONNECTION_LIMIT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('The maximum number of connections per second '
                      'allowed for the vip.')
                ),
                VIP_PROTOCOL_PORT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('TCP port on which to listen for client traffic '
                      'that is associated with the vip address.'),
                    required=True
                ),
                VIP_SESSION_PERSISTENCE: properties.Schema(
                    properties.Schema.MAP,
                    _('Configuration of session persistence.'),
                    schema={
                        VIP_SESSION_PERSISTENCE_TYPE: properties.Schema(
                            properties.Schema.STRING,
                            _('Method of implementation of session '
                              'persistence feature.'),
                            required=True,
                            constraints=[constraints.AllowedValues(
                                ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
                            )]
                        ),
                        VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
                            properties.Schema.STRING,
                            _('Name of the cookie, '
                              'required if type is APP_COOKIE.')
                        )
                    }
                ),
                VIP_ADMIN_STATE_UP: properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('The administrative state of this vip.'),
                    default=True
                ),
            },
            required=True
        ),
        MONITORS: properties.Schema(
            properties.Schema.LIST,
            _('List of health monitors associated with the pool.'),
            default=[],
            update_allowed=True
        ),
    }

    update_allowed_keys = ('Properties',)

    attributes_schema = {
        'admin_state_up': _('The administrative state of this pool.'),
        'name': _('Name of the pool.'),
        'protocol': _('Protocol to balance.'),
        'subnet_id': _('The subnet for the port on which the members '
                       'of the pool will be connected.'),
        'lb_method': _('The algorithm used to distribute load between the '
                       'members of the pool.'),
        'description': _('Description of the pool.'),
        'tenant_id': _('Tenant owning the pool.'),
        'vip': _('Vip associated with the pool.'),
    }

    def validate(self):
        res = super(Pool, self).validate()
        if res:
            return res

        session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
        if session_p is None:
            # session persistence is not configured, skip validation
            return

        persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
        if persistence_type == 'APP_COOKIE':
            if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
                return

            msg = _('Property cookie_name is required, when '
                    'session_persistence type is set to APP_COOKIE.')
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        vip_properties = properties.pop(self.VIP)
        monitors = properties.pop(self.MONITORS)
        client = self.neutron()
        pool = client.create_pool({'pool': properties})['pool']
        self.resource_id_set(pool['id'])

        for monitor in monitors:
            client.associate_health_monitor(
                pool['id'], {'health_monitor': {'id': monitor}})

        vip_arguments = self.prepare_properties(
            vip_properties,
            '%s.vip' % (self.name,))

        session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
        if session_p is not None:
            prepared_props = self.prepare_properties(session_p, None)
            vip_arguments['session_persistence'] = prepared_props

        vip_arguments['protocol'] = self.properties[self.PROTOCOL]

        if vip_arguments.get(self.VIP_SUBNET) is None:
            vip_arguments['subnet_id'] = self.properties[self.SUBNET_ID]
        else:
            vip_arguments[
                'subnet_id'] = neutronV20.find_resourceid_by_name_or_id(
                    self.neutron(),
                    'subnet',
                    vip_arguments.pop(self.VIP_SUBNET))

        vip_arguments['pool_id'] = pool['id']
        vip = client.create_vip({'vip': vip_arguments})['vip']

        self.metadata = {'vip': vip['id']}

    def _show_resource(self):
        return self.neutron().show_pool(self.resource_id)['pool']

    def check_create_complete(self, data):
        attributes = self._show_resource()
        if attributes['status'] == 'PENDING_CREATE':
            return False
        elif attributes['status'] == 'ACTIVE':
            vip_attributes = self.neutron().show_vip(
                self.metadata['vip'])['vip']
            if vip_attributes['status'] == 'PENDING_CREATE':
                return False
            elif vip_attributes['status'] == 'ACTIVE':
                return True
            raise exception.Error(
                _('neutron reported unexpected vip resource[%(name)s] '
                  'status[%(status)s]') %
                {'name': vip_attributes['name'],
                 'status': vip_attributes['status']})
        raise exception.Error(
            _('neutron reported unexpected pool resource[%(name)s] '
              'status[%(status)s]') %
            {'name': attributes['name'],
             'status': attributes['status']})

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            client = self.neutron()
            monitors = set(prop_diff.pop(self.MONITORS, []))
            if monitors:
                old_monitors = set(self.properties[self.MONITORS])
                for monitor in old_monitors - monitors:
                    client.disassociate_health_monitor(self.resource_id,
                                                       monitor)
                for monitor in monitors - old_monitors:
                    client.associate_health_monitor(
                        self.resource_id, {'health_monitor': {'id': monitor}})

            if prop_diff:
                client.update_pool(self.resource_id, {'pool': prop_diff})

    def _resolve_attribute(self, name):
        if name == 'vip':
            return self.neutron().show_vip(self.metadata['vip'])['vip']
        return super(Pool, self)._resolve_attribute(name)

    def _confirm_vip_delete(self):
        client = self.neutron()
        while True:
            try:
                yield
                client.show_vip(self.metadata['vip'])
            except NeutronClientException as ex:
                self._handle_not_found_exception(ex)
                break

    def handle_delete(self):
        checkers = []
        if self.metadata:
            try:
                self.neutron().delete_vip(self.metadata['vip'])
            except NeutronClientException as ex:
                self._handle_not_found_exception(ex)
            else:
                checkers.append(scheduler.TaskRunner(self._confirm_vip_delete))
        try:
            self.neutron().delete_pool(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            checkers.append(scheduler.TaskRunner(self._confirm_delete))
        return checkers

    def check_delete_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True