Esempio n. 1
0
        def constraints():
            def get_num(key):
                val = schema_dict.get(key)
                if val is not None:
                    val = Schema.str_to_num(val)
                return val

            if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
                yield constr.Range(get_num(MIN_VALUE), get_num(MAX_VALUE))
            if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
                yield constr.Length(get_num(MIN_LENGTH), get_num(MAX_LENGTH))
            if ALLOWED_VALUES in schema_dict:
                yield constr.AllowedValues(schema_dict[ALLOWED_VALUES])
            if ALLOWED_PATTERN in schema_dict:
                yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN])
Esempio n. 2
0
        def constraints():
            desc = schema_dict.get(CONSTRAINT_DESCRIPTION)

            if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
                yield constr.Range(Schema.get_num(MIN_VALUE, schema_dict),
                                   Schema.get_num(MAX_VALUE, schema_dict),
                                   desc)
            if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
                yield constr.Length(Schema.get_num(MIN_LENGTH, schema_dict),
                                    Schema.get_num(MAX_LENGTH, schema_dict),
                                    desc)
            if ALLOWED_VALUES in schema_dict:
                yield constr.AllowedValues(schema_dict[ALLOWED_VALUES], desc)
            if ALLOWED_PATTERN in schema_dict:
                yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN], desc)
Esempio n. 3
0
        def constraints():
            constraints = schema_dict.get(cls.CONSTRAINTS)
            if constraints is None:
                return

            if not isinstance(constraints, list):
                raise exception.InvalidSchemaError(
                    message=_("Invalid parameter constraints for parameter "
                              "%s, expected a list") % param_name)

            for constraint in constraints:
                cls._check_dict(constraint, PARAM_CONSTRAINTS,
                                'parameter constraints')
                desc = constraint.get(DESCRIPTION)
                if RANGE in constraint:
                    cdef = constraint.get(RANGE)
                    cls._check_dict(cdef, RANGE_KEYS, 'range constraint')
                    yield constr.Range(parameters.Schema.get_num(MIN, cdef),
                                       parameters.Schema.get_num(MAX, cdef),
                                       desc)
                elif LENGTH in constraint:
                    cdef = constraint.get(LENGTH)
                    cls._check_dict(cdef, RANGE_KEYS, 'length constraint')
                    yield constr.Length(parameters.Schema.get_num(MIN, cdef),
                                        parameters.Schema.get_num(MAX, cdef),
                                        desc)
                elif ALLOWED_VALUES in constraint:
                    cdef = constraint.get(ALLOWED_VALUES)
                    yield constr.AllowedValues(cdef, desc)
                elif ALLOWED_PATTERN in constraint:
                    cdef = constraint.get(ALLOWED_PATTERN)
                    yield constr.AllowedPattern(cdef, desc)
                elif CUSTOM_CONSTRAINT in constraint:
                    cdef = constraint.get(CUSTOM_CONSTRAINT)
                    yield constr.CustomConstraint(cdef, desc)
                else:
                    raise exception.InvalidSchemaError(
                        message=_("No constraint expressed"))
Esempio n. 4
0
class ResourceGroup(stack_resource.StackResource):
    """Creates one or more identically configured nested resources.

    In addition to the `refs` attribute, this resource implements synthetic
    attributes that mirror those of the resources in the group. When
    getting an attribute from this resource, however, a list of attribute
    values for each resource in the group is returned. To get attribute values
    for a single resource in the group, synthetic attributes of the form
    `resource.{resource index}.{attribute name}` can be used. The resource ID
    of a particular resource in the group can be obtained via the synthetic
    attribute `resource.{resource index}`. Note, that if you get attribute
    without `{resource index}`, e.g. `[resource, {attribute_name}]`, you'll get
    a list of this attribute's value for all resources in group.

    While each resource in the group will be identically configured, this
    resource does allow for some index-based customization of the properties
    of the resources in the group. For example::

      resources:
        my_indexed_group:
          type: OS::Heat::ResourceGroup
          properties:
            count: 3
            resource_def:
              type: OS::Nova::Server
              properties:
                # create a unique name for each server
                # using its index in the group
                name: my_server_%index%
                image: CentOS 6.5
                flavor: 4GB Performance

    would result in a group of three servers having the same image and flavor,
    but names of `my_server_0`, `my_server_1`, and `my_server_2`. The variable
    used for substitution can be customized by using the `index_var` property.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        COUNT,
        INDEX_VAR,
        RESOURCE_DEF,
        REMOVAL_POLICIES,
    ) = (
        'count',
        'index_var',
        'resource_def',
        'removal_policies',
    )

    _RESOURCE_DEF_KEYS = (
        RESOURCE_DEF_TYPE,
        RESOURCE_DEF_PROPERTIES,
        RESOURCE_DEF_METADATA,
    ) = (
        'type',
        'properties',
        'metadata',
    )

    _REMOVAL_POLICIES_KEYS = (REMOVAL_RSRC_LIST, ) = ('resource_list', )

    _ROLLING_UPDATES_SCHEMA_KEYS = (
        MIN_IN_SERVICE,
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'min_in_service',
        'max_batch_size',
        'pause_time',
    )

    _BATCH_CREATE_SCHEMA_KEYS = (
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'max_batch_size',
        'pause_time',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE,
        BATCH_CREATE,
    ) = (
        'rolling_update',
        'batch_create',
    )

    ATTRIBUTES = (
        REFS,
        ATTR_ATTRIBUTES,
    ) = (
        'refs',
        'attributes',
    )

    properties_schema = {
        COUNT:
        properties.Schema(properties.Schema.INTEGER,
                          _('The number of resources to create.'),
                          default=1,
                          constraints=[
                              constraints.Range(min=0),
                          ],
                          update_allowed=True),
        INDEX_VAR:
        properties.Schema(
            properties.Schema.STRING,
            _('A variable that this resource will use to replace with the '
              'current index of a given resource in the group. Can be used, '
              'for example, to customize the name property of grouped '
              'servers in order to differentiate them when listed with '
              'nova client.'),
            default="%index%",
            constraints=[constraints.Length(min=3)],
            support_status=support.SupportStatus(version='2014.2')),
        RESOURCE_DEF:
        properties.Schema(
            properties.Schema.MAP,
            _('Resource definition for the resources in the group. The value '
              'of this property is the definition of a resource just as if '
              'it had been declared in the template itself.'),
            schema={
                RESOURCE_DEF_TYPE:
                properties.Schema(properties.Schema.STRING,
                                  _('The type of the resources in the group.'),
                                  required=True),
                RESOURCE_DEF_PROPERTIES:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Property values for the resources in the group.')),
                RESOURCE_DEF_METADATA:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Supplied metadata for the resources in the group.'),
                    support_status=support.SupportStatus(version='5.0.0')),
            },
            required=True,
            update_allowed=True),
        REMOVAL_POLICIES:
        properties.Schema(
            properties.Schema.LIST,
            _('Policies for removal of resources on update.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                _('Policy to be processed when doing an update which '
                  'requires removal of specific resources.'),
                schema={
                    REMOVAL_RSRC_LIST:
                    properties.Schema(
                        properties.Schema.LIST,
                        _("List of resources to be removed "
                          "when doing an update which requires removal of "
                          "specific resources. "
                          "The resource may be specified several ways: "
                          "(1) The resource name, as in the nested stack, "
                          "(2) The resource reference returned from "
                          "get_resource in a template, as available via "
                          "the 'refs' attribute. "
                          "Note this is destructive on update when specified; "
                          "even if the count is not being reduced, and once "
                          "a resource name is removed, it's name is never "
                          "reused in subsequent updates."),
                        default=[]),
                },
            ),
            update_allowed=True,
            default=[],
            support_status=support.SupportStatus(version='2015.1')),
    }

    attributes_schema = {
        REFS:
        attributes.Schema(
            _("A list of resource IDs for the resources in the group."),
            type=attributes.Schema.LIST),
        ATTR_ATTRIBUTES:
        attributes.Schema(
            _("A map of resource names to the specified attribute of each "
              "individual resource. "
              "Requires heat_template_version: 2014-10-16."),
            support_status=support.SupportStatus(version='2014.2'),
            type=attributes.Schema.MAP),
    }

    rolling_update_schema = {
        MIN_IN_SERVICE:
        properties.Schema(properties.Schema.INTEGER,
                          _('The minimum number of resources in service while '
                            'rolling updates are being executed.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
        MAX_BATCH_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum number of resources to replace at once.'),
            constraints=[constraints.Range(min=1)],
            default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.NUMBER,
                          _('The number of seconds to wait between batches of '
                            'updates.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
    }

    batch_create_schema = {
        MAX_BATCH_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum number of resources to create at once.'),
            constraints=[constraints.Range(min=1)],
            default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.NUMBER,
                          _('The number of seconds to wait between batches.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
    }

    update_policy_schema = {
        ROLLING_UPDATE:
        properties.Schema(
            properties.Schema.MAP,
            schema=rolling_update_schema,
            support_status=support.SupportStatus(version='5.0.0')),
        BATCH_CREATE:
        properties.Schema(
            properties.Schema.MAP,
            schema=batch_create_schema,
            support_status=support.SupportStatus(version='5.0.0'))
    }

    def get_size(self):
        return self.properties.get(self.COUNT)

    def validate_nested_stack(self):
        # Only validate the resource definition (which may be a
        # nested template) if count is non-zero, to enable folks
        # to disable features via a zero count if they wish
        if not self.get_size():
            return

        test_tmpl = self._assemble_nested(["0"], include_all=True)
        res_def = next(
            six.itervalues(test_tmpl.resource_definitions(self.stack)))
        # make sure we can resolve the nested resource type
        self.stack.env.get_class_to_instantiate(res_def.resource_type)

        try:
            name = "%s-%s" % (self.stack.name, self.name)
            nested_stack = self._parse_nested_stack(name, test_tmpl,
                                                    self.child_params())
            nested_stack.strict_validate = False
            nested_stack.validate()
        except Exception as ex:
            msg = _("Failed to validate: %s") % six.text_type(ex)
            raise exception.StackValidationFailed(message=msg)

    def _name_blacklist(self):
        """Resolve the remove_policies to names for removal."""

        nested = self.nested()

        # To avoid reusing names after removal, we store a comma-separated
        # blacklist in the resource data
        db_rsrc_names = self.data().get('name_blacklist')
        if db_rsrc_names:
            current_blacklist = db_rsrc_names.split(',')
        else:
            current_blacklist = []

        # Now we iterate over the removal policies, and update the blacklist
        # with any additional names
        rsrc_names = set(current_blacklist)

        if nested:
            for r in self.properties[self.REMOVAL_POLICIES]:
                if self.REMOVAL_RSRC_LIST in r:
                    # Tolerate string or int list values
                    for n in r[self.REMOVAL_RSRC_LIST]:
                        str_n = six.text_type(n)
                        if str_n in nested:
                            rsrc_names.add(str_n)
                            continue
                        rsrc = nested.resource_by_refid(str_n)
                        if rsrc:
                            rsrc_names.add(rsrc.name)

        # If the blacklist has changed, update the resource data
        if rsrc_names != set(current_blacklist):
            self.data_set('name_blacklist', ','.join(rsrc_names))
        return rsrc_names

    def _resource_names(self, size=None):
        name_blacklist = self._name_blacklist()
        if size is None:
            size = self.get_size()

        def is_blacklisted(name):
            return name in name_blacklist

        candidates = six.moves.map(six.text_type, itertools.count())

        return itertools.islice(
            six.moves.filterfalse(is_blacklisted, candidates), size)

    def _count_black_listed(self):
        """Return the number of current resource names that are blacklisted."""
        existing_members = grouputils.get_member_names(self)
        return len(self._name_blacklist() & set(existing_members))

    def handle_create(self):
        if self.update_policy.get(self.BATCH_CREATE):
            batch_create = self.update_policy[self.BATCH_CREATE]
            max_batch_size = batch_create[self.MAX_BATCH_SIZE]
            pause_sec = batch_create[self.PAUSE_TIME]
            checkers = self._replace(0, max_batch_size, pause_sec)
            checkers[0].start()
            return checkers
        else:
            names = self._resource_names()
            self.create_with_template(self._assemble_nested(names),
                                      self.child_params(),
                                      self.stack.timeout_secs())

    def check_create_complete(self, checkers=None):
        if checkers is None:
            return super(ResourceGroup, self).check_create_complete()
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def _run_to_completion(self, template, timeout):
        updater = self.update_with_template(template, {}, timeout)

        while not super(ResourceGroup, self).check_update_complete(updater):
            yield

    def _run_update(self, total_capacity, max_updates, timeout):
        template = self._assemble_for_rolling_update(total_capacity,
                                                     max_updates)
        return self._run_to_completion(template, timeout)

    def check_update_complete(self, checkers):
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if tmpl_diff:
            # parse update policy
            if rsrc_defn.UPDATE_POLICY in tmpl_diff:
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        checkers = []
        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff and self.RESOURCE_DEF in prop_diff:
            updaters = self._try_rolling_update()
            if updaters:
                checkers.extend(updaters)

        if not checkers:
            resizer = scheduler.TaskRunner(
                self._run_to_completion,
                self._assemble_nested(self._resource_names()),
                self.stack.timeout_mins)
            checkers.append(resizer)

        checkers[0].start()
        return checkers

    def get_attribute(self, key, *path):
        if key.startswith("resource."):
            return grouputils.get_nested_attrs(self, key, False, *path)

        names = self._resource_names()
        if key == self.REFS:
            vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names]
            return attributes.select_from_attribute(vals, path)
        if key == self.ATTR_ATTRIBUTES:
            if not path:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            return dict(
                (n, grouputils.get_rsrc_attr(self, key, False, n, *path))
                for n in names)

        path = [key] + list(path)
        return [
            grouputils.get_rsrc_attr(self, key, False, n, *path) for n in names
        ]

    def build_resource_definition(self, res_name, res_defn):
        res_def = copy.deepcopy(res_defn)
        props = res_def.get(self.RESOURCE_DEF_PROPERTIES)
        if props:
            repl_props = self._handle_repl_val(res_name, props)
            res_def[self.RESOURCE_DEF_PROPERTIES] = repl_props
        return template.HOTemplate20130523.rsrc_defn_from_snippet(
            res_name, res_def)

    def get_resource_def(self, include_all=False):
        """Returns the resource definition portion of the group.

        :param include_all: if False, only properties for the resource
               definition that are not empty will be included
        :type include_all: bool
        :return: resource definition for the group
        :rtype: dict
        """

        # At this stage, we don't mind if all of the parameters have values
        # assigned. Pass in a custom resolver to the properties to not
        # error when a parameter does not have a user entered value.
        def ignore_param_resolve(snippet):
            while isinstance(snippet, function.Function):
                try:
                    snippet = snippet.result()
                except exception.UserParameterMissing:
                    return None

            if isinstance(snippet, collections.Mapping):
                return dict(
                    (k, ignore_param_resolve(v)) for k, v in snippet.items())
            elif (not isinstance(snippet, six.string_types)
                  and isinstance(snippet, collections.Iterable)):
                return [ignore_param_resolve(v) for v in snippet]

            return snippet

        self.properties.resolve = ignore_param_resolve

        res_def = self.properties[self.RESOURCE_DEF]
        if not include_all:
            return self._clean_props(res_def)
        return res_def

    def _clean_props(self, res_defn):
        res_def = copy.deepcopy(res_defn)
        props = res_def.get(self.RESOURCE_DEF_PROPERTIES)
        if props:
            clean = dict((k, v) for k, v in props.items() if v is not None)
            props = clean
            res_def[self.RESOURCE_DEF_PROPERTIES] = props
        return res_def

    def _handle_repl_val(self, res_name, val):
        repl_var = self.properties[self.INDEX_VAR]

        def recurse(x):
            return self._handle_repl_val(res_name, x)

        if isinstance(val, six.string_types):
            return val.replace(repl_var, res_name)
        elif isinstance(val, collections.Mapping):
            return {k: recurse(v) for k, v in val.items()}
        elif isinstance(val, collections.Sequence):
            return [recurse(v) for v in val]
        return val

    def _assemble_nested(self,
                         names,
                         include_all=False,
                         template_version=('heat_template_version',
                                           '2015-04-30')):

        def_dict = self.get_resource_def(include_all)
        definitions = [(k, self.build_resource_definition(k, def_dict))
                       for k in names]
        return scl_template.make_template(definitions,
                                          version=template_version)

    def _assemble_for_rolling_update(self,
                                     total_capacity,
                                     max_updates,
                                     include_all=False,
                                     template_version=('heat_template_version',
                                                       '2015-04-30')):
        names = list(self._resource_names(total_capacity))
        name_blacklist = self._name_blacklist()

        valid_resources = [(n, d)
                           for n, d in grouputils.get_member_definitions(self)
                           if n not in name_blacklist]

        targ_cap = self.get_size()

        def replace_priority(res_item):
            name, defn = res_item
            try:
                index = names.index(name)
            except ValueError:
                # High priority - delete immediately
                return 0
            else:
                if index < targ_cap:
                    # Update higher indices first
                    return targ_cap - index
                else:
                    # Low priority - don't update
                    return total_capacity

        old_resources = sorted(valid_resources, key=replace_priority)
        existing_names = set(n for n, d in valid_resources)
        new_names = six.moves.filterfalse(lambda n: n in existing_names, names)
        res_def = self.get_resource_def(include_all)
        definitions = scl_template.member_definitions(
            old_resources, res_def, total_capacity, max_updates,
            lambda: next(new_names), self.build_resource_definition)
        return scl_template.make_template(definitions,
                                          version=template_version)

    def _try_rolling_update(self):
        if self.update_policy[self.ROLLING_UPDATE]:
            policy = self.update_policy[self.ROLLING_UPDATE]
            return self._replace(policy[self.MIN_IN_SERVICE],
                                 policy[self.MAX_BATCH_SIZE],
                                 policy[self.PAUSE_TIME])

    def _update_timeout(self, batch_cnt, pause_sec):
        total_pause_time = pause_sec * max(batch_cnt - 1, 0)
        if total_pause_time >= self.stack.timeout_secs():
            msg = _('The current %s will result in stack update '
                    'timeout.') % rsrc_defn.UPDATE_POLICY
            raise ValueError(msg)
        return self.stack.timeout_secs() - total_pause_time

    @staticmethod
    def _get_batches(targ_cap, curr_cap, batch_size, min_in_service):
        updated = 0

        while rolling_update.needs_update(targ_cap, curr_cap, updated):
            new_cap, total_new = rolling_update.next_batch(
                targ_cap, curr_cap, updated, batch_size, min_in_service)

            yield new_cap, total_new

            updated += total_new - max(new_cap - max(curr_cap, targ_cap), 0)
            curr_cap = new_cap

    def _replace(self, min_in_service, batch_size, pause_sec):
        def pause_between_batch(pause_sec):
            duration = timeutils.Duration(pause_sec)
            while not duration.expired():
                yield

        # blacklist count existing
        num_blacklist = self._count_black_listed()

        # current capacity not including existing blacklisted
        curr_cap = len(self.nested()) - num_blacklist if self.nested() else 0

        batches = list(
            self._get_batches(self.get_size(), curr_cap, batch_size,
                              min_in_service))
        update_timeout = self._update_timeout(len(batches), pause_sec)

        def tasks():
            for index, (curr_cap, max_upd) in enumerate(batches):
                yield scheduler.TaskRunner(self._run_update, curr_cap, max_upd,
                                           update_timeout)

                if index < (len(batches) - 1) and pause_sec > 0:
                    yield scheduler.TaskRunner(pause_between_batch, pause_sec)

        return list(tasks())

    def child_template(self):
        names = self._resource_names()
        return self._assemble_nested(names)

    def child_params(self):
        return {}

    def handle_adopt(self, resource_data):
        names = self._resource_names()
        if names:
            return self.create_with_template(self._assemble_nested(names), {},
                                             adopt_data=resource_data)
Esempio n. 5
0
class NetworkGateway(neutron.NeutronResource):
    """Network Gateway resource in Neutron Network Gateway.

    Resource for connecting internal networks with specified devices.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        NAME,
        DEVICES,
        CONNECTIONS,
    ) = (
        'name',
        'devices',
        'connections',
    )

    ATTRIBUTES = (DEFAULT, ) = ('default', )

    _DEVICES_KEYS = (
        ID,
        INTERFACE_NAME,
    ) = (
        'id',
        'interface_name',
    )

    _CONNECTIONS_KEYS = (
        NETWORK_ID,
        NETWORK,
        SEGMENTATION_TYPE,
        SEGMENTATION_ID,
    ) = (
        'network_id',
        'network',
        'segmentation_type',
        'segmentation_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          description=_('The name of the network gateway.'),
                          update_allowed=True),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Device info for this network gateway.'),
            required=True,
            constraints=[constraints.Length(min=1)],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ID:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The device id for the network '
                                          'gateway.'),
                                      required=True),
                    INTERFACE_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The interface name for the '
                                          'network gateway.'),
                                      required=True)
                })),
        CONNECTIONS:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Connection info for this network gateway.'),
            default={},
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            message=_('Use property %s.') % NETWORK,
                            version='5.0.0',
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED, version='2014.2')),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    NETWORK:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_('The internal network to connect on '
                                      'the network gateway.'),
                        support_status=support.SupportStatus(version='2014.2'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    SEGMENTATION_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'L2 segmentation strategy on the external '
                            'side of the network gateway.'),
                        default='flat',
                        constraints=[
                            constraints.AllowedValues(('flat', 'vlan'))
                        ]),
                    SEGMENTATION_ID:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        description=_(
                            'The id for L2 segment on the external side '
                            'of the network gateway. Must be specified '
                            'when using vlan.'),
                        constraints=[constraints.Range(0, 4094)])
                }))
    }

    attributes_schema = {
        DEFAULT:
        attributes.Schema(_("A boolean value of default flag."),
                          type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.CONNECTIONS, self.NETWORK],
                                        value_name=self.NETWORK_ID),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.CONNECTIONS, self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network')
        ]

    def _show_resource(self):
        return self.client().show_network_gateway(
            self.resource_id)['network_gateway']

    def validate(self):
        """Validate any of the provided params."""
        super(NetworkGateway, self).validate()
        connections = self.properties[self.CONNECTIONS]

        for connection in connections:
            segmentation_type = connection[self.SEGMENTATION_TYPE]
            segmentation_id = connection.get(self.SEGMENTATION_ID)

            if segmentation_type == 'vlan' and segmentation_id is None:
                msg = _("segmentation_id must be specified for using vlan")
                raise exception.StackValidationFailed(message=msg)

            if segmentation_type == 'flat' and segmentation_id:
                msg = _("segmentation_id cannot be specified except 0 for "
                        "using flat")
                raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        connections = props.pop(self.CONNECTIONS)
        ret = self.client().create_network_gateway({'network_gateway':
                                                    props})['network_gateway']

        self.resource_id_set(ret['id'])

        for connection in connections:
            if self.NETWORK in connection:
                connection['network_id'] = connection.pop(self.NETWORK)
            self.client().connect_network_gateway(ret['id'], connection)

    def handle_delete(self):
        if not self.resource_id:
            return

        connections = self.properties[self.CONNECTIONS]
        for connection in connections:
            with self.client_plugin().ignore_not_found:
                if self.NETWORK in connection:
                    connection['network_id'] = connection.pop(self.NETWORK)
                self.client().disconnect_network_gateway(
                    self.resource_id, connection)
        try:
            self.client().delete_network_gateway(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        connections = None
        if self.CONNECTIONS in prop_diff:
            connections = prop_diff.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(prop_diff)
            self.handle_create()
            return

        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_network_gateway(
                self.resource_id, {'network_gateway': prop_diff})

        if connections:
            for connection in self.properties[self.CONNECTIONS]:
                with self.client_plugin().ignore_not_found:
                    if self.NETWORK in connection:
                        connection['network_id'] = connection.pop(self.NETWORK)
                    self.client().disconnect_network_gateway(
                        self.resource_id, connection)
            for connection in connections:
                if self.NETWORK in connection:
                    connection['network_id'] = connection.pop(self.NETWORK)
                self.client().connect_network_gateway(self.resource_id,
                                                      connection)
Esempio n. 6
0
class SoftwareComponent(sc.SoftwareConfig):
    """A resource for describing and storing a software component.

    This resource is similar to OS::Heat::SoftwareConfig. In contrast to
    SoftwareConfig which allows for storing only one configuration (e.g. one
    script), SoftwareComponent allows for storing multiple configurations to
    address handling of all lifecycle hooks (CREATE, UPDATE, SUSPEND, RESUME,
    DELETE) for a software component in one place.

    This resource is backed by the persistence layer and the API of the
    SoftwareConfig resource, and only adds handling for the additional
    'configs' property and attribute.
    """

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        CONFIGS,
        INPUTS,
        OUTPUTS,
        OPTIONS,
    ) = ('configs', 'inputs', 'outputs', 'options')

    CONFIG_PROPERTIES = (
        CONFIG_ACTIONS,
        CONFIG_CONFIG,
        CONFIG_TOOL,
    ) = (
        'actions',
        'config',
        'tool',
    )

    ATTRIBUTES = (CONFIGS_ATTR, ) = ('configs', )

    # properties schema for one entry in the 'configs' list
    config_schema = properties.Schema(
        properties.Schema.MAP,
        schema={
            CONFIG_ACTIONS:
            properties.Schema(
                # Note: This properties schema allows for custom actions to be
                # specified, which will however require special handling in
                # in-instance hooks. By default, only the standard actions
                # stated below will be handled.
                properties.Schema.LIST,
                _('Lifecycle actions to which the configuration applies. '
                  'The string values provided for this property can include '
                  'the standard resource actions CREATE, DELETE, UPDATE, '
                  'SUSPEND and RESUME supported by Heat.'),
                default=[resource.Resource.CREATE, resource.Resource.UPDATE],
                schema=properties.Schema(properties.Schema.STRING),
                constraints=[
                    constr.Length(min=1),
                ]),
            CONFIG_CONFIG:
            sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.CONFIG],
            CONFIG_TOOL:
            properties.Schema(
                properties.Schema.STRING,
                _('The configuration tool used to actually apply the '
                  'configuration on a server. This string property has '
                  'to be understood by in-instance tools running inside '
                  'deployed servers.'),
                required=True)
        })

    properties_schema = {
        CONFIGS:
        properties.Schema(
            properties.Schema.LIST,
            _('The list of configurations for the different lifecycle actions '
              'of the represented software component.'),
            schema=config_schema,
            constraints=[constr.Length(min=1)],
            required=True),
        INPUTS:
        sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.INPUTS],
        OUTPUTS:
        sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.OUTPUTS],
        OPTIONS:
        sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.OPTIONS],
    }

    def handle_create(self):
        props = dict(self.properties)
        props[self.NAME] = self.physical_resource_name()
        # use config property of SoftwareConfig to store configs list
        configs = self.properties[self.CONFIGS]
        props[self.CONFIG] = {self.CONFIGS: configs}
        # set 'group' to enable component processing by in-instance hook
        props[self.GROUP] = 'component'
        del props['configs']

        sc = self.rpc_client().create_software_config(self.context, **props)
        self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID])

    def _resolve_attribute(self, name):
        """Retrieve attributes of the SoftwareComponent resource.

        'configs' returns the list of configurations for the software
        component's lifecycle actions. If the attribute does not exist,
        an empty list is returned.
        """
        if name == self.CONFIGS_ATTR and self.resource_id:
            try:
                sc = self.rpc_client().show_software_config(
                    self.context, self.resource_id)
                # configs list is stored in 'config' property of parent class
                # (see handle_create)
                return sc[rpc_api.SOFTWARE_CONFIG_CONFIG].get(self.CONFIGS)
            except Exception as ex:
                self.rpc_client().ignore_error_named(ex, 'NotFound')

    def validate(self):
        """Validate SoftwareComponent properties consistency."""
        super(SoftwareComponent, self).validate()

        # One lifecycle action (e.g. CREATE) can only be associated with one
        # config; otherwise a way to define ordering would be required.
        configs = self.properties.get(self.CONFIGS, [])
        config_actions = set()
        for config in configs:
            actions = config.get(self.CONFIG_ACTIONS)
            if any(action in config_actions for action in actions):
                msg = _('Defining more than one configuration for the same '
                        'action in SoftwareComponent "%s" is not allowed.'
                        ) % self.name
                raise exception.StackValidationFailed(message=msg)
            config_actions.update(actions)
Esempio n. 7
0
class SubnetPool(neutron.NeutronResource):
    """A resource that implements neutron subnet pool.

    This resource can be used to create a subnet pool with a large block
    of addresses and create subnets from it.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'subnet_allocation'

    PROPERTIES = (
        NAME,
        PREFIXES,
        ADDRESS_SCOPE,
        DEFAULT_QUOTA,
        DEFAULT_PREFIXLEN,
        MIN_PREFIXLEN,
        MAX_PREFIXLEN,
        IS_DEFAULT,
        TENANT_ID,
        SHARED,
    ) = (
        'name',
        'prefixes',
        'address_scope',
        'default_quota',
        'default_prefixlen',
        'min_prefixlen',
        'max_prefixlen',
        'is_default',
        'tenant_id',
        'shared',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the subnet pool.'),
                          update_allowed=True),
        PREFIXES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of subnet prefixes to assign.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                constraints=[
                    constraints.CustomConstraint('net_cidr'),
                ],
            ),
            constraints=[constraints.Length(min=1)],
            required=True,
            update_allowed=True,
        ),
        ADDRESS_SCOPE:
        properties.Schema(
            properties.Schema.STRING,
            _('An address scope ID to assign to the subnet pool.'),
            constraints=[
                constraints.CustomConstraint('neutron.address_scope')
            ],
            update_allowed=True,
        ),
        DEFAULT_QUOTA:
        properties.Schema(
            properties.Schema.INTEGER,
            _('A per-tenant quota on the prefix space that can be allocated '
              'from the subnet pool for tenant subnets.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        DEFAULT_PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The size of the prefix to allocate when the cidr or '
              'prefixlen attributes are not specified while creating '
              'a subnet.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        MIN_PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Smallest prefix size that can be allocated '
              'from the subnet pool.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        MAX_PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum prefix size that can be allocated '
              'from the subnet pool.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        IS_DEFAULT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this is default IPv4/IPv6 subnet pool. '
              'There can only be one default subnet pool for each IP family. '
              'Note that the default policy setting restricts administrative '
              'users to set this to True.'),
            default=False,
            update_allowed=True,
        ),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the tenant who owns the subnet pool. Only '
              'administrative users can specify a tenant ID '
              'other than their own.')),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the subnet pool will be shared across all tenants. '
              'Note that the default policy setting restricts usage of this '
              'attribute to administrative users only.'),
            default=False,
        ),
    }

    def validate(self):
        super(SubnetPool, self).validate()
        self._validate_prefix_bounds()

    def _validate_prefix_bounds(self):
        min_prefixlen = self.properties[self.MIN_PREFIXLEN]
        default_prefixlen = self.properties[self.DEFAULT_PREFIXLEN]
        max_prefixlen = self.properties[self.MAX_PREFIXLEN]
        msg_fmt = _('Illegal prefix bounds: %(key1)s=%(value1)s, '
                    '%(key2)s=%(value2)s.')
        # min_prefixlen can not be greater than max_prefixlen
        if min_prefixlen and max_prefixlen and min_prefixlen > max_prefixlen:
            msg = msg_fmt % dict(key1=self.MAX_PREFIXLEN,
                                 value1=max_prefixlen,
                                 key2=self.MIN_PREFIXLEN,
                                 value2=min_prefixlen)
            raise exception.StackValidationFailed(message=msg)

        if default_prefixlen:
            # default_prefixlen can not be greater than max_prefixlen
            if max_prefixlen and default_prefixlen > max_prefixlen:
                msg = msg_fmt % dict(key1=self.MAX_PREFIXLEN,
                                     value1=max_prefixlen,
                                     key2=self.DEFAULT_PREFIXLEN,
                                     value2=default_prefixlen)
                raise exception.StackValidationFailed(message=msg)
            # min_prefixlen can not be greater than default_prefixlen
            if min_prefixlen and min_prefixlen > default_prefixlen:
                msg = msg_fmt % dict(key1=self.MIN_PREFIXLEN,
                                     value1=min_prefixlen,
                                     key2=self.DEFAULT_PREFIXLEN,
                                     value2=default_prefixlen)
                raise exception.StackValidationFailed(message=msg)

    def _validate_prefixes_for_update(self, prop_diff):
        old_prefixes = self.properties[self.PREFIXES]
        new_prefixes = prop_diff[self.PREFIXES]
        # check new_prefixes is a superset of old_prefixes
        if not netutils.is_prefix_subset(old_prefixes, new_prefixes):
            msg = (_('Property %(key)s updated value %(new)s should '
                     'be superset of existing value '
                     '%(old)s.') % dict(key=self.PREFIXES,
                                        new=sorted(new_prefixes),
                                        old=sorted(old_prefixes)))
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        if self.ADDRESS_SCOPE in props and props[self.ADDRESS_SCOPE]:
            props['address_scope_id'] = self.client_plugin(
            ).find_resourceid_by_name_or_id('address_scope',
                                            props.pop(self.ADDRESS_SCOPE))
        subnetpool = self.client().create_subnetpool({'subnetpool':
                                                      props})['subnetpool']
        self.resource_id_set(subnetpool['id'])

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().delete_subnetpool(self.resource_id)

    def _show_resource(self):
        return self.client().show_subnetpool(self.resource_id)['subnetpool']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        # check that new prefixes are superset of existing prefixes
        if self.PREFIXES in prop_diff:
            self._validate_prefixes_for_update(prop_diff)
        if self.ADDRESS_SCOPE in prop_diff:
            if prop_diff[self.ADDRESS_SCOPE]:
                prop_diff['address_scope_id'] = self.client_plugin(
                ).find_resourceid_by_name_or_id(
                    self.client(), 'address_scope',
                    prop_diff.pop(self.ADDRESS_SCOPE))
            else:
                prop_diff['address_scope_id'] = prop_diff.pop(
                    self.ADDRESS_SCOPE)
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_subnetpool(self.resource_id,
                                            {'subnetpool': prop_diff})
Esempio n. 8
0
class KeyPair(resource.Resource):
    """A resource for creating Nova key pairs.

    A keypair is a ssh key that can be injected into a server on launch.

    **Note** that if a new key is generated setting `save_private_key` to
    `True` results in the system saving the private key which can then be
    retrieved via the `private_key` attribute of this resource.

    Setting the `public_key` property means that the `private_key` attribute
    of this resource will always return an empty string regardless of the
    `save_private_key` setting since there will be no private key data to
    save.
    """

    support_status = support.SupportStatus(version='2014.1')

    required_service_extension = 'os-keypairs'

    PROPERTIES = (
        NAME,
        SAVE_PRIVATE_KEY,
        PUBLIC_KEY,
    ) = (
        'name',
        'save_private_key',
        'public_key',
    )

    ATTRIBUTES = (
        PUBLIC_KEY_ATTR,
        PRIVATE_KEY_ATTR,
    ) = (
        'public_key',
        'private_key',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of the key pair.'),
                          required=True,
                          constraints=[constraints.Length(min=1, max=255)]),
        SAVE_PRIVATE_KEY:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('True if the system should remember a generated private key; '
              'False otherwise.'),
            default=False),
        PUBLIC_KEY:
        properties.Schema(
            properties.Schema.STRING,
            _('The optional public key. This allows users to supply the '
              'public key from a pre-existing key pair. If not supplied, a '
              'new key pair will be generated.')),
    }

    attributes_schema = {
        PUBLIC_KEY_ATTR:
        attributes.Schema(_('The public key.'), type=attributes.Schema.STRING),
        PRIVATE_KEY_ATTR:
        attributes.Schema(_('The private key if it has been saved.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'nova'

    entity = 'keypairs'

    def __init__(self, name, json_snippet, stack):
        super(KeyPair, self).__init__(name, json_snippet, stack)
        self._public_key = None

    @property
    def private_key(self):
        """Return the private SSH key for the resource."""
        if self.properties[self.SAVE_PRIVATE_KEY]:
            return self.data().get('private_key', '')
        else:
            return ''

    @property
    def public_key(self):
        """Return the public SSH key for the resource."""
        if not self._public_key:
            if self.properties[self.PUBLIC_KEY]:
                self._public_key = self.properties[self.PUBLIC_KEY]
            elif self.resource_id:
                nova_key = self.client_plugin().get_keypair(self.resource_id)
                self._public_key = nova_key.public_key
        return self._public_key

    def handle_create(self):
        pub_key = self.properties[self.PUBLIC_KEY] or None
        new_keypair = self.client().keypairs.create(self.properties[self.NAME],
                                                    public_key=pub_key)
        if (self.properties[self.SAVE_PRIVATE_KEY]
                and hasattr(new_keypair, 'private_key')):
            self.data_set('private_key', new_keypair.private_key, True)
        self.resource_id_set(new_keypair.id)

    def handle_check(self):
        self.client().keypairs.get(self.resource_id)

    def _resolve_attribute(self, key):
        attr_fn = {
            self.PRIVATE_KEY_ATTR: self.private_key,
            self.PUBLIC_KEY_ATTR: self.public_key
        }
        return six.text_type(attr_fn[key])

    def get_reference_id(self):
        return self.resource_id
Esempio n. 9
0
class Member(elb_res_base.ElbBaseResource):
    """A resource for member .

    Member resource for Elastic Load Balance Service.
    """

    MEMBER_KEYS = (
        SERVER_ID,
        ADDRESS,
    ) = (
        'server_id',
        'address',
    )
    PROPERTIES = (
        LISTENER_ID,
        MEMBERS,
    ) = (
        'listener_id',
        'members',
    )

    properties_schema = {
        LISTENER_ID:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of listener associated.'),
                          required=True,
                          constraints=[constraints.CustomConstraint('elb.ls')
                                       ]),
        MEMBERS:
        properties.Schema(
            properties.Schema.LIST,
            _('The servers to add as members.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SERVER_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('ID of the server to add.'),
                        constraints=[
                            constraints.CustomConstraint('nova.server')
                        ],
                        required=True,
                    ),
                    ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The private address of the server to add.'),
                        required=True,
                    )
                }),
            required=True,
            constraints=[constraints.Length(min=1, max=6)],
            update_allowed=True),
    }

    def validate(self):
        super(Member, self).validate()
        members = self.properties[self.MEMBERS]
        server_ids = [m['server_id'] for m in members]
        if len(server_ids) != len(set(server_ids)):
            msg = (_('The %(sid)s must be different in property %(mem)s.') % {
                'sid': self.SERVER_ID,
                'mem': self.MEMBERS
            })
            raise exception.StackValidationFailed(message=msg)

    def _parse_members_entities(self, members_info):
        base_members_info = []
        member_ids = []
        for m in members_info:
            m_id = m['id']
            member_ids.append(m_id)
            base_info = None
            if m.get('server_id'):
                base_info = '.'.join([m_id, m.get('server_id')])
            else:
                base_info = m_id
            base_members_info.append(base_info)

        return member_ids, base_members_info

    def _handle_job_success(self, operate='create', entities=None):
        if entities:
            members_info = entities.get('members', [])
            mem_ids, base_info = self._parse_members_entities(members_info)
            if operate == 'create':
                if mem_ids:
                    # set 'mem_id1, mem_id2, mem_id3...' as resource_id
                    self.resource_id_set(','.join(mem_ids))
                if base_info:
                    # set resource_data to:
                    # {'members': 'mid1.sid1, mid2.sid2'}
                    self.data_set('members', ','.join(base_info))
                return
            old_member_ids = self.resource_id.split(',')
            old_base_info = self.data().get('members')
            old_base_list = old_base_info.split(',')

            if operate == 'update_add':
                member_ids = old_member_ids + mem_ids
                self.resource_id_set(','.join(set(member_ids)))
                self.data_set('members', ','.join(old_base_list + base_info))
            elif operate == 'update_remove':
                member_ids = set(old_member_ids) - set(mem_ids)
                self.resource_id_set(','.join(member_ids))
                ms_infos = copy.deepcopy(old_base_list)
                for info in base_info:
                    for m in old_base_list:
                        # there is no server_id in response.entities
                        # of remove_member, make sure member_id equal
                        if info.split('.')[0] == m.split('.')[0]:
                            ms_infos.remove(m)
                            break

                self.data_set('members', ','.join(ms_infos))

    def _get_vpc_tag(self):
        ls_id = self.properties[self.LISTENER_ID]
        lb_id = self.client().listener.get(ls_id).loadbalancer_id
        vpc_id = self.client().loadbalancer.get(lb_id).vpc_id

        return vpc_id

    @retrying.retry(stop_max_attempt_number=60,
                    wait_fixed=2000,
                    retry_on_result=utils.retry_if_result_is_false)
    def _match_servers_tag(self, tag, ids):
        servers = self.client('nova').servers.list(search_opts={'tag': tag})
        tagged = []
        for m in ids:
            for s in servers:
                if s.id == m:
                    tagged.append(m)
                    break
        return len(tagged) == len(ids)

    def _tag_check(self, tag, ids):
        try:
            if self._match_servers_tag(tag, ids):
                LOG.info(_LI('Check tags success!'))
        except retrying.RetryError:
            # just log the find server's tag failed
            LOG.info(_LI('Check tags failed!'))

    def handle_create(self):
        vpc_id = self._get_vpc_tag()
        ids = [m[self.SERVER_ID] for m in self.properties[self.MEMBERS]]
        self._tag_check(vpc_id, ids)

        props = self._prepare_properties(self.properties)
        job_id = self.client().listener.add_member(**props)['job_id']
        job_info = {'job_id': job_id, 'action': self.action}
        self._set_job(job_info)
        return job_id

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            new_members = prop_diff.get(self.MEMBERS)
            old_members = self.properties[self.MEMBERS]
            add_members = [m for m in new_members if m not in old_members]
            remove_members = [m for m in old_members if m not in new_members]
            remove_ids = []
            stored_base_info = copy.deepcopy(
                self.data().get('members')).split(',')

            for rm in remove_members:
                for info in stored_base_info:
                    ms = info.split('.')
                    if rm['server_id'] == ms[1]:
                        remove_ids.append({'id': ms[0]})
                        break

            add_job_id = None
            remove_job_id = None
            self.add_job_success = True
            self.remove_job_success = True
            if add_members:
                vpc_id = self._get_vpc_tag()
                ids = [m[self.SERVER_ID] for m in add_members]
                self._tag_check(vpc_id, ids)
                add_job_id = self.client().listener.add_member(
                    listener_id=self.properties[self.LISTENER_ID],
                    members=add_members)['job_id']
                self.add_job_success = False
            if remove_ids:
                remove_job_id = self.client().listener.remove_member(
                    listener_id=self.properties[self.LISTENER_ID],
                    removeMember=remove_ids)['job_id']
                self.remove_job_success = False
            return add_job_id, remove_job_id

        return None, None

    def handle_delete(self):
        ls_id = self.properties[self.LISTENER_ID]
        # if there is no ls_id, maybe the listener resource
        # of the backup stack has not been created yet, in
        # this case, we don't have to call remove_member
        # with invalid listener id
        if not ls_id:
            return

        if not self.resource_id:
            job_info = self._get_job()
            job_id = job_info.get('job_id')
            if not job_id:
                return

            try:
                job_status, entities, error_code = self._get_job_info(job_id)
            except Exception as e:
                if self.client_plugin().is_not_found(e):
                    LOG.info('job %s not found', job_id)
                    return
                raise e

            if job_status == utils.SUCCESS:
                members_info = entities.get('members', [])
                member_ids, base_info = self._parse_members_entities(
                    members_info)
                self.resource_id_set(','.join(set(member_ids)))
            return
        member_id_list = self.resource_id.split(',')
        remove_members = [{'id': m} for m in member_id_list]
        job_id = self.client().listener.remove_member(
            listener_id=ls_id, removeMember=remove_members)['job_id']

        return job_id

    def check_create_complete(self, job_id):
        job_status, entities, error_code = self._get_job_info(job_id)
        if job_status == utils.FAIL:
            self._set_job({})
            raise exception.ResourceUnknownStatus(
                result=(_('Job %(job)s failed: %(error_code)s, '
                          '%(entities)s') % {
                              'job': job_id,
                              'error_code': error_code,
                              'entities': entities
                          }),
                resource_status='Unknown')
        if job_status == utils.SUCCESS:
            self._handle_job_success(entities=entities)
            self._set_job({})
            return True

    def check_update_complete(self, job_ids):
        add_job_id, remove_job_id = job_ids
        if not add_job_id and not remove_job_id:
            return True
        # check add job
        if add_job_id and not self.add_job_success:
            job_status, entities, error_code = self._get_job_info(add_job_id)
            if job_status == utils.FAIL:
                raise exception.ResourceUnknownStatus(
                    result=(_('Job %(job)s failed: %(error_code)s, '
                              '%(entities)s') % {
                                  'job': add_job_id,
                                  'error_code': error_code,
                                  'entities': entities
                              }),
                    resource_status='Unknown')
            if job_status == utils.SUCCESS:
                self.add_job_success = True
                self._handle_job_success(operate='update_add',
                                         entities=entities)
        if remove_job_id and not self.remove_job_success:
            job_status, entities, error_code =\
                self._get_job_info(remove_job_id)
            if job_status == utils.FAIL:
                raise exception.ResourceUnknownStatus(
                    result=(_('Job %(job)s failed: %(error_code)s, '
                              '%(entities)s') % {
                                  'job': remove_job_id,
                                  'error_code': error_code,
                                  'entities': entities
                              }),
                    resource_status='Unknown')
            if job_status == utils.SUCCESS:
                self.remove_job_success = True
                self._handle_job_success(operate='update_remove',
                                         entities=entities)

        return self.add_job_success and self.remove_job_success

    def check_delete_complete(self, job_id):
        if not job_id:
            return True
        return self._check_job_success(job_id, ignore_not_found=True)