Пример #1
0
class SoftwareDeployment(signal_responder.SignalResponder):
    """This resource associates a server with some configuration.

    The configuration is to be deployed to that server.

    A deployment allows input values to be specified which map to the inputs
    schema defined in the config resource. These input values are interpreted
    by the configuration tool in a tool-specific manner.

    Whenever this resource goes to an IN_PROGRESS state, it creates an
    ephemeral config that includes the inputs values plus a number of extra
    inputs which have names prefixed with deploy_. The extra inputs relate
    to the current state of the stack, along with the information and
    credentials required to signal back the deployment results.

    Unless signal_transport=NO_SIGNAL, this resource will remain in an
    IN_PROGRESS state until the server signals it with the output values
    for that deployment. Those output values are then available as resource
    attributes, along with the default attributes deploy_stdout,
    deploy_stderr and deploy_status_code.

    Specifying actions other than the default CREATE and UPDATE will result
    in the deployment being triggered in those actions. For example this would
    allow cleanup configuration to be performed during actions SUSPEND and
    DELETE. A config could be designed to only work with some specific
    actions, or a config can read the value of the deploy_action input to
    allow conditional logic to perform different configuration for different
    actions.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        CONFIG, SERVER, INPUT_VALUES,
        DEPLOY_ACTIONS, NAME, SIGNAL_TRANSPORT
    ) = (
        'config', 'server', 'input_values',
        'actions', 'name', 'signal_transport'
    )

    ALLOWED_DEPLOY_ACTIONS = (
        resource.Resource.CREATE,
        resource.Resource.UPDATE,
        resource.Resource.DELETE,
        resource.Resource.SUSPEND,
        resource.Resource.RESUME,
    )

    ATTRIBUTES = (
        STDOUT, STDERR, STATUS_CODE
    ) = (
        'deploy_stdout', 'deploy_stderr', 'deploy_status_code'
    )

    DERIVED_CONFIG_INPUTS = (
        DEPLOY_SERVER_ID, DEPLOY_ACTION,
        DEPLOY_SIGNAL_ID, DEPLOY_STACK_ID,
        DEPLOY_RESOURCE_NAME, DEPLOY_AUTH_URL,
        DEPLOY_USERNAME, DEPLOY_PASSWORD,
        DEPLOY_PROJECT_ID, DEPLOY_USER_ID,
        DEPLOY_SIGNAL_VERB, DEPLOY_SIGNAL_TRANSPORT,
        DEPLOY_QUEUE_ID
    ) = (
        'deploy_server_id', 'deploy_action',
        'deploy_signal_id', 'deploy_stack_id',
        'deploy_resource_name', 'deploy_auth_url',
        'deploy_username', 'deploy_password',
        'deploy_project_id', 'deploy_user_id',
        'deploy_signal_verb', 'deploy_signal_transport',
        'deploy_queue_id'
    )

    SIGNAL_TRANSPORTS = (
        CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL, NO_SIGNAL,
        ZAQAR_SIGNAL
    ) = (
        'CFN_SIGNAL', 'TEMP_URL_SIGNAL', 'HEAT_SIGNAL', 'NO_SIGNAL',
        'ZAQAR_SIGNAL'
    )

    properties_schema = {
        CONFIG: properties.Schema(
            properties.Schema.STRING,
            _('ID of software configuration resource to execute when '
              'applying to the server.'),
            update_allowed=True
        ),
        SERVER: properties.Schema(
            properties.Schema.STRING,
            _('ID of resource to apply configuration to. '
              'Normally this should be a Nova server ID.'),
            required=True,
        ),
        INPUT_VALUES: properties.Schema(
            properties.Schema.MAP,
            _('Input values to apply to the software configuration on this '
              'server.'),
            update_allowed=True
        ),
        DEPLOY_ACTIONS: properties.Schema(
            properties.Schema.LIST,
            _('Which lifecycle actions of the deployment resource will result '
              'in this deployment being triggered.'),
            update_allowed=True,
            default=[resource.Resource.CREATE, resource.Resource.UPDATE],
            constraints=[constraints.AllowedValues(ALLOWED_DEPLOY_ACTIONS)]
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the derived config associated with this deployment. '
              'This is used to apply a sort order to the list of '
              'configurations currently deployed to a server.'),
            update_allowed=True
        ),
        SIGNAL_TRANSPORT: properties.Schema(
            properties.Schema.STRING,
            _('How the server should signal to heat with the deployment '
              'output values. CFN_SIGNAL will allow an HTTP POST to a CFN '
              'keypair signed URL. TEMP_URL_SIGNAL will create a '
              'Swift TempURL to be signaled via HTTP PUT. HEAT_SIGNAL '
              'will allow calls to the Heat API resource-signal using the '
              'provided keystone credentials. ZAQAR_SIGNAL will create a '
              'dedicated zaqar queue to be signaled using the provided '
              'keystone credentials. NO_SIGNAL will result in the resource '
              'going to the COMPLETE state without waiting for any signal.'),
            default=cfg.CONF.default_deployment_signal_transport,
            constraints=[
                constraints.AllowedValues(SIGNAL_TRANSPORTS),
            ]
        ),
    }

    attributes_schema = {
        STDOUT: attributes.Schema(
            _("Captured stdout from the configuration execution."),
            type=attributes.Schema.STRING
        ),
        STDERR: attributes.Schema(
            _("Captured stderr from the configuration execution."),
            type=attributes.Schema.STRING
        ),
        STATUS_CODE: attributes.Schema(
            _("Returned status code from the configuration execution."),
            type=attributes.Schema.STRING
        ),
    }

    default_client_name = 'heat'

    no_signal_actions = ()

    # No need to make metadata_update() calls since deployments have a
    # dedicated API for changing state on signals
    signal_needs_metadata_updates = False

    def _build_properties(self, config_id, action):
        props = {
            'config_id': config_id,
            'action': action,
            'input_values': self.properties.get(self.INPUT_VALUES)
        }

        if self._signal_transport_none():
            props['status'] = SoftwareDeployment.COMPLETE
            props['status_reason'] = _('Not waiting for outputs signal')
        else:
            props['status'] = SoftwareDeployment.IN_PROGRESS
            props['status_reason'] = _('Deploy data available')
        return props

    def _delete_derived_config(self, derived_config_id):
        try:
            self.rpc_client().delete_software_config(
                self.context, derived_config_id)
        except Exception as ex:
            self.rpc_client().ignore_error_named(ex, 'NotFound')

    def _get_derived_config(self, action, source_config):

        derived_params = self._build_derived_config_params(
            action, source_config)
        derived_config = self.rpc_client().create_software_config(
            self.context, **derived_params)
        return derived_config[rpc_api.SOFTWARE_CONFIG_ID]

    def _load_config(self):
        if self.properties.get(self.CONFIG):
            config = self.rpc_client().show_software_config(
                self.context, self.properties.get(self.CONFIG))
        else:
            config = {}

        config[rpc_api.SOFTWARE_CONFIG_INPUTS] = [
            swc_io.InputConfig(**i)
            for i in config.get(rpc_api.SOFTWARE_CONFIG_INPUTS, [])
        ]
        config[rpc_api.SOFTWARE_CONFIG_OUTPUTS] = [
            swc_io.OutputConfig(**o)
            for o in config.get(rpc_api.SOFTWARE_CONFIG_OUTPUTS, [])
        ]

        return config

    def _handle_action(self, action, config=None):
        if config is None:
            config = self._load_config()

        if config.get(rpc_api.SOFTWARE_CONFIG_GROUP) == 'component':
            valid_actions = set()
            for conf in config[rpc_api.SOFTWARE_CONFIG_CONFIG]['configs']:
                valid_actions.update(conf['actions'])
            if action not in valid_actions:
                return
        elif action not in self.properties[self.DEPLOY_ACTIONS]:
            return

        props = self._build_properties(
            self._get_derived_config(action, config),
            action)

        if self.resource_id is None:
            resource_id = str(uuid.uuid4())
            self.resource_id_set(resource_id)
            sd = self.rpc_client().create_software_deployment(
                self.context,
                deployment_id=resource_id,
                server_id=self.properties[SoftwareDeployment.SERVER],
                stack_user_project_id=self.stack.stack_user_project_id,
                **props)
        else:
            sd = self.rpc_client().show_software_deployment(
                self.context, self.resource_id)
            prev_derived_config = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID]
            sd = self.rpc_client().update_software_deployment(
                self.context,
                deployment_id=self.resource_id,
                **props)
            if prev_derived_config:
                self._delete_derived_config(prev_derived_config)
        if not self._signal_transport_none():
            # NOTE(pshchelo): sd is a simple dict, easy to serialize,
            # does not need fixing re LP bug #1393268
            return sd

    def _check_complete(self):
        sd = self.rpc_client().show_software_deployment(
            self.context, self.resource_id)
        status = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS]
        if status == SoftwareDeployment.COMPLETE:
            return True
        elif status == SoftwareDeployment.FAILED:
            status_reason = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS_REASON]
            message = _("Deployment to server failed: %s") % status_reason
            LOG.info(message)
            raise exception.Error(message)

    def _server_exists(self, sd):
        """Returns whether or not the deployment's server exists."""
        nova_client = self.client_plugin('nova')

        try:
            nova_client.get_server(sd['server_id'])
            return True
        except exception.EntityNotFound:
            return False

    def empty_config(self):
        return ''

    def _build_derived_config_params(self, action, source):
        derived_inputs = self._build_derived_inputs(action, source)
        derived_options = self._build_derived_options(action, source)
        derived_config = self._build_derived_config(
            action, source, derived_inputs, derived_options)
        derived_name = (self.properties.get(self.NAME) or
                        source.get(rpc_api.SOFTWARE_CONFIG_NAME))
        return {
            rpc_api.SOFTWARE_CONFIG_GROUP:
                source.get(rpc_api.SOFTWARE_CONFIG_GROUP) or 'Heat::Ungrouped',
            rpc_api.SOFTWARE_CONFIG_CONFIG:
                derived_config or self.empty_config(),
            rpc_api.SOFTWARE_CONFIG_OPTIONS: derived_options,
            rpc_api.SOFTWARE_CONFIG_INPUTS:
                [i.as_dict() for i in derived_inputs],
            rpc_api.SOFTWARE_CONFIG_OUTPUTS:
                [o.as_dict() for o in source[rpc_api.SOFTWARE_CONFIG_OUTPUTS]],
            rpc_api.SOFTWARE_CONFIG_NAME:
                derived_name or self.physical_resource_name()
        }

    def _build_derived_config(self, action, source,
                              derived_inputs, derived_options):
        return source.get(rpc_api.SOFTWARE_CONFIG_CONFIG)

    def _build_derived_options(self, action, source):
        return source.get(rpc_api.SOFTWARE_CONFIG_OPTIONS)

    def _build_derived_inputs(self, action, source):
        inputs = source[rpc_api.SOFTWARE_CONFIG_INPUTS]
        input_values = dict(self.properties[self.INPUT_VALUES] or {})

        def derive_inputs():
            for input_config in inputs:
                value = input_values.pop(input_config.name(),
                                         input_config.default())
                yield swc_io.InputConfig(value=value, **input_config.as_dict())

            # for any input values that do not have a declared input, add
            # a derived declared input so that they can be used as config
            # inputs
            for inpk, inpv in input_values.items():
                yield swc_io.InputConfig(name=inpk, value=inpv)

            yield swc_io.InputConfig(
                name=self.DEPLOY_SERVER_ID, value=self.properties[self.SERVER],
                description=_('ID of the server being deployed to'))
            yield swc_io.InputConfig(
                name=self.DEPLOY_ACTION, value=action,
                description=_('Name of the current action being deployed'))
            yield swc_io.InputConfig(
                name=self.DEPLOY_STACK_ID,
                value=self.stack.identifier().stack_path(),
                description=_('ID of the stack this deployment belongs to'))
            yield swc_io.InputConfig(
                name=self.DEPLOY_RESOURCE_NAME, value=self.name,
                description=_('Name of this deployment resource in the stack'))
            yield swc_io.InputConfig(
                name=self.DEPLOY_SIGNAL_TRANSPORT,
                value=self.properties[self.SIGNAL_TRANSPORT],
                description=_('How the server should signal to heat with '
                              'the deployment output values.'))

            if self._signal_transport_cfn():
                yield swc_io.InputConfig(
                    name=self.DEPLOY_SIGNAL_ID,
                    value=self._get_ec2_signed_url(),
                    description=_('ID of signal to use for signaling output '
                                  'values'))
                yield swc_io.InputConfig(
                    name=self.DEPLOY_SIGNAL_VERB, value='POST',
                    description=_('HTTP verb to use for signaling output'
                                  'values'))

            elif self._signal_transport_temp_url():
                yield swc_io.InputConfig(
                    name=self.DEPLOY_SIGNAL_ID,
                    value=self._get_swift_signal_url(),
                    description=_('ID of signal to use for signaling output '
                                  'values'))
                yield swc_io.InputConfig(
                    name=self.DEPLOY_SIGNAL_VERB, value='PUT',
                    description=_('HTTP verb to use for signaling output'
                                  'values'))

            elif (self._signal_transport_heat() or
                  self._signal_transport_zaqar()):
                creds = self._get_heat_signal_credentials()
                yield swc_io.InputConfig(
                    name=self.DEPLOY_AUTH_URL, value=creds['auth_url'],
                    description=_('URL for API authentication'))
                yield swc_io.InputConfig(
                    name=self.DEPLOY_USERNAME, value=creds['username'],
                    description=_('Username for API authentication'))
                yield swc_io.InputConfig(
                    name=self.DEPLOY_USER_ID, value=creds['user_id'],
                    description=_('User ID for API authentication'))
                yield swc_io.InputConfig(
                    name=self.DEPLOY_PASSWORD, value=creds['password'],
                    description=_('Password for API authentication'))
                yield swc_io.InputConfig(
                    name=self.DEPLOY_PROJECT_ID, value=creds['project_id'],
                    description=_('ID of project for API authentication'))

            if self._signal_transport_zaqar():
                yield swc_io.InputConfig(
                    name=self.DEPLOY_QUEUE_ID,
                    value=self._get_zaqar_signal_queue_id(),
                    description=_('ID of queue to use for signaling output '
                                  'values'))

        return list(derive_inputs())

    def handle_create(self):
        return self._handle_action(self.CREATE)

    def check_create_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        old_config_id = self.properties.get(self.CONFIG)
        config = self._load_config()
        old_inputs = {i.name(): i
                      for i in self._build_derived_inputs(self.UPDATE, config)}

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)

        new_config_id = self.properties.get(self.CONFIG)
        if old_config_id != new_config_id:
            config = self._load_config()
        new_inputs = {i.name(): i
                      for i in self._build_derived_inputs(self.UPDATE, config)}

        for name, inp in six.iteritems(new_inputs):
            if inp.replace_on_change() and name in old_inputs:
                if inp.input_data() != old_inputs[name].input_data():
                    LOG.debug('Replacing SW Deployment due to change in '
                              'input "%s"', name)
                    raise exception.UpdateReplace

        return self._handle_action(self.UPDATE, config=config)

    def check_update_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_delete(self):
        try:
            return self._handle_action(self.DELETE)
        except Exception as ex:
            self.rpc_client().ignore_error_named(ex, 'NotFound')

    def check_delete_complete(self, sd=None):
        if not sd or not self._server_exists(sd) or self._check_complete():
            self._delete_resource()
            return True

    def _delete_resource(self):
        self._delete_signals()
        self._delete_user()

        derived_config_id = None
        if self.resource_id is not None:
            try:
                sd = self.rpc_client().show_software_deployment(
                    self.context, self.resource_id)
                derived_config_id = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID]
                self.rpc_client().delete_software_deployment(
                    self.context, self.resource_id)
            except Exception as ex:
                self.rpc_client().ignore_error_named(ex, 'NotFound')

        if derived_config_id:
            self._delete_derived_config(derived_config_id)

    def handle_suspend(self):
        return self._handle_action(self.SUSPEND)

    def check_suspend_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_resume(self):
        return self._handle_action(self.RESUME)

    def check_resume_complete(self, sd):
        if not sd:
            return True
        return self._check_complete()

    def handle_signal(self, details):
        return self.rpc_client().signal_software_deployment(
            self.context, self.resource_id, details,
            timeutils.utcnow().isoformat())

    def get_attribute(self, key, *path):
        """Resource attributes map to deployment outputs values."""
        sd = self.rpc_client().show_software_deployment(
            self.context, self.resource_id)
        ov = sd[rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_VALUES] or {}
        if key in ov:
            attribute = ov.get(key)
            return attributes.select_from_attribute(attribute, path)

        # Since there is no value for this key yet, check the output schemas
        # to find out if the key is valid
        sc = self.rpc_client().show_software_config(
            self.context, self.properties[self.CONFIG])
        outputs = sc[rpc_api.SOFTWARE_CONFIG_OUTPUTS] or []
        output_keys = [output['name'] for output in outputs]
        if key not in output_keys and key not in self.ATTRIBUTES:
            raise exception.InvalidTemplateAttribute(resource=self.name,
                                                     key=key)
        return None

    def validate(self):
        """Validate any of the provided params.

        :raises StackValidationFailed: if any property failed validation.
        """
        super(SoftwareDeployment, self).validate()
        server = self.properties[self.SERVER]
        if server:
            res = self.stack.resource_by_refid(server)
            if res:
                if not (res.properties.get('user_data_format') ==
                        'SOFTWARE_CONFIG'):
                    raise exception.StackValidationFailed(message=_(
                        "Resource %s's property user_data_format should be "
                        "set to SOFTWARE_CONFIG since there are software "
                        "deployments on it.") % server)
Пример #2
0
class CinderEncryptedVolumeType(resource.Resource):
    """
    A resource for encrypting a cinder volume type.

    Note that default cinder security policy usage of this resource
    is limited to being used by administrators only.
    """

    support_status = support.SupportStatus(version='5.0.0')

    default_client_name = 'cinder'

    entity = 'volume_encryption_types'

    PROPERTIES = (
        PROVIDER, CONTROL_LOCATION, CIPHER, KEY_SIZE, VOLUME_TYPE
    ) = (
        'provider', 'control_location', 'cipher', 'key_size', 'volume_type'
    )

    properties_schema = {
        PROVIDER: properties.Schema(
            properties.Schema.STRING,
            _('The class that provides encryption support. '
              'For example, nova.volume.encryptors.luks.LuksEncryptor.'),
            required=True,
            update_allowed=True
        ),
        CONTROL_LOCATION: properties.Schema(
            properties.Schema.STRING,
            _('Notional service where encryption is performed '
              'For example, front-end. For Nova.'),
            constraints=[
                constraints.AllowedValues(['front-end', 'back-end'])
            ],
            default='front-end',
            update_allowed=True
        ),
        CIPHER: properties.Schema(
            properties.Schema.STRING,
            _('The encryption algorithm or mode. '
              'For example, aes-xts-plain64.'),
            constraints=[
                constraints.AllowedValues(
                    ['aes-xts-plain64', 'aes-cbc-essiv']
                )
            ],
            default=None,
            update_allowed=True
        ),
        KEY_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Size of encryption key, in bits. '
              'For example, 128 or 256.'),
            default=None,
            update_allowed=True
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Name or id of volume type (OS::Cinder::VolumeType).'),
            required=True,
            constraints=[constraints.CustomConstraint('cinder.vtype')]
        ),
    }

    def _get_vol_type_id(self, volume_type):
        id = self.client_plugin().get_volume_type(volume_type)
        return id

    def handle_create(self):
        body = {
            'provider': self.properties[self.PROVIDER],
            'cipher': self.properties[self.CIPHER],
            'key_size': self.properties[self.KEY_SIZE],
            'control_location': self.properties[self.CONTROL_LOCATION]
        }

        vol_type_id = self._get_vol_type_id(self.properties[self.VOLUME_TYPE])

        encrypted_vol_type = self.client().volume_encryption_types.create(
            volume_type=vol_type_id, specs=body
        )
        self.resource_id_set(encrypted_vol_type.volume_type_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().volume_encryption_types.update(
                volume_type=self.resource_id, specs=prop_diff
            )
Пример #3
0
class HealthMonitor(neutron.NeutronResource):
    """
    A resource for managing health monitors for load balancers in Neutron.
    """

    PROPERTIES = (
        DELAY, TYPE, MAX_RETRIES, TIMEOUT, ADMIN_STATE_UP,
        HTTP_METHOD, EXPECTED_CODES, URL_PATH,
    ) = (
        'delay', 'type', 'max_retries', 'timeout', 'admin_state_up',
        'http_method', 'expected_codes', 'url_path',
    )

    properties_schema = {
        DELAY: properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum time in seconds between regular connections of '
              'the member.'),
            required=True,
            update_allowed=True
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('One of predefined health monitor types.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        MAX_RETRIES: properties.Schema(
            properties.Schema.INTEGER,
            _('Number of permissible connection failures before changing the '
              'member status to INACTIVE.'),
            required=True,
            update_allowed=True
        ),
        TIMEOUT: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of seconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            required=True,
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of the health monitor.'),
            default=True,
            update_allowed=True
        ),
        HTTP_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP method used for requests by the monitor of type '
              'HTTP.'),
            update_allowed=True
        ),
        EXPECTED_CODES: properties.Schema(
            properties.Schema.STRING,
            _('The list of HTTP status codes expected in response from the '
              'member to declare it healthy.'),
            update_allowed=True
        ),
        URL_PATH: properties.Schema(
            properties.Schema.STRING,
            _('The HTTP path used in the HTTP request used by the monitor to '
              'test a member health.'),
            update_allowed=True
        ),
    }

    update_allowed_keys = ('Properties',)

    attributes_schema = {
        'admin_state_up': _('The administrative state of this health '
                            'monitor.'),
        'delay': _('The minimum time in seconds between regular connections '
                   'of the member.'),
        'expected_codes': _('The list of HTTP status codes expected in '
                            'response from the member to declare it healthy.'),
        'http_method': _('The HTTP method used for requests by the monitor of '
                         'type HTTP.'),
        'max_retries': _('Number of permissible connection failures before '
                         'changing the member status to INACTIVE.'),
        'timeout': _('Maximum number of seconds for a monitor to wait for a '
                     'connection to be established before it times out.'),
        'type': _('One of predefined health monitor types.'),
        'url_path': _('The HTTP path used in the HTTP request used by the '
                      'monitor to test a member health.'),
        'tenant_id': _('Tenant owning the health monitor.'),
        'show': _('All attributes.'),
    }

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        health_monitor = self.neutron().create_health_monitor(
            {'health_monitor': properties})['health_monitor']
        self.resource_id_set(health_monitor['id'])

    def _show_resource(self):
        return self.neutron().show_health_monitor(
            self.resource_id)['health_monitor']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.neutron().update_health_monitor(
                self.resource_id, {'health_monitor': prop_diff})

    def handle_delete(self):
        try:
            self.neutron().delete_health_monitor(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            return self._delete_task()
Пример #4
0
 def test_allowed_values_schema(self):
     d = {'allowed_values': ['foo', 'bar'], 'description': 'allowed values'}
     r = constraints.AllowedValues(['foo', 'bar'],
                                   description='allowed values')
     self.assertEqual(d, dict(r))
Пример #5
0
class SecurityGroup(neutron.NeutronResource):

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        RULES,
    ) = (
        'name',
        'description',
        'rules',
    )

    _RULE_KEYS = (
        RULE_DIRECTION,
        RULE_ETHERTYPE,
        RULE_PORT_RANGE_MIN,
        RULE_PORT_RANGE_MAX,
        RULE_PROTOCOL,
        RULE_REMOTE_MODE,
        RULE_REMOTE_GROUP_ID,
        RULE_REMOTE_IP_PREFIX,
    ) = (
        'direction',
        'ethertype',
        'port_range_min',
        'port_range_max',
        'protocol',
        'remote_mode',
        'remote_group_id',
        'remote_ip_prefix',
    )

    _rule_schema = {
        RULE_DIRECTION:
        properties.Schema(
            properties.Schema.STRING,
            _('The direction in which the security group rule is applied. '
              'For a compute instance, an ingress security group rule '
              'matches traffic that is incoming (ingress) for that '
              'instance. An egress rule is applied to traffic leaving '
              'the instance.'),
            default='ingress',
            constraints=[
                constraints.AllowedValues(['ingress', 'egress']),
            ]),
        RULE_ETHERTYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Ethertype of the traffic.'),
                          default='IPv4',
                          constraints=[
                              constraints.AllowedValues(['IPv4', 'IPv6']),
                          ]),
        RULE_PORT_RANGE_MIN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum port number in the range that is matched by the '
              'security group rule. If the protocol is TCP or UDP, this '
              'value must be less than or equal to the value of the '
              'port_range_max attribute. If the protocol is ICMP, this '
              'value must be an ICMP type.')),
        RULE_PORT_RANGE_MAX:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum port number in the range that is matched by the '
              'security group rule. The port_range_min attribute constrains '
              'the port_range_max attribute. If the protocol is ICMP, this '
              'value must be an ICMP type.')),
        RULE_PROTOCOL:
        properties.Schema(
            properties.Schema.STRING,
            _('The protocol that is matched by the security group rule. '
              'Valid values include tcp, udp, and icmp.')),
        RULE_REMOTE_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('Whether to specify a remote group or a remote IP prefix.'),
            default='remote_ip_prefix',
            constraints=[
                constraints.AllowedValues(
                    ['remote_ip_prefix', 'remote_group_id']),
            ]),
        RULE_REMOTE_GROUP_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The remote group ID to be associated with this security group '
              'rule. If no value is specified then this rule will use this '
              'security group for the remote_group_id.')),
        RULE_REMOTE_IP_PREFIX:
        properties.Schema(
            properties.Schema.STRING,
            _('The remote IP prefix (CIDR) to be associated with this '
              'security group rule.')),
    }

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('A string specifying a symbolic name for the security group, '
              'which is not required to be unique.'),
            update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of the security group.'),
                          update_allowed=True),
        RULES:
        properties.Schema(properties.Schema.LIST,
                          _('List of security group rules.'),
                          default=[],
                          schema=properties.Schema(properties.Schema.MAP,
                                                   schema=_rule_schema),
                          update_allowed=True),
    }

    default_egress_rules = [{
        "direction": "egress",
        "ethertype": "IPv4"
    }, {
        "direction": "egress",
        "ethertype": "IPv6"
    }]

    def validate(self):
        super(SecurityGroup, self).validate()
        if self.properties.get(self.NAME) == 'default':
            msg = _('Security groups cannot be assigned the name "default".')
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        rules = props.pop(self.RULES, [])

        sec = self.neutron().create_security_group({'security_group':
                                                    props})['security_group']

        self.resource_id_set(sec['id'])
        self._create_rules(rules)

    def _format_rule(self, r):
        rule = dict(r)
        rule['security_group_id'] = self.resource_id

        if 'remote_mode' in rule:
            remote_mode = rule.get(self.RULE_REMOTE_MODE)
            del (rule[self.RULE_REMOTE_MODE])

            if remote_mode == self.RULE_REMOTE_GROUP_ID:
                rule[self.RULE_REMOTE_IP_PREFIX] = None
                if not rule.get(self.RULE_REMOTE_GROUP_ID):
                    # if remote group is not specified then make this
                    # a self-referencing rule
                    rule[self.RULE_REMOTE_GROUP_ID] = self.resource_id
            else:
                rule[self.RULE_REMOTE_GROUP_ID] = None

        for key in (self.RULE_PORT_RANGE_MIN, self.RULE_PORT_RANGE_MAX):
            if rule.get(key) is not None:
                rule[key] = str(rule[key])
        return rule

    def _create_rules(self, rules):
        egress_deleted = False

        for i in rules:
            if i[self.RULE_DIRECTION] == 'egress' and not egress_deleted:
                # There is at least one egress rule, so delete the default
                # rules which allow all egress traffic
                egress_deleted = True

                def is_egress(rule):
                    return rule[self.RULE_DIRECTION] == 'egress'

                self._delete_rules(is_egress)

            rule = self._format_rule(i)

            try:
                self.neutron().create_security_group_rule(
                    {'security_group_rule': rule})
            except neutron_exp.NeutronClientException as ex:
                # ignore error if rule already exists
                if ex.status_code != 409:
                    raise

    def _delete_rules(self, to_delete=None):
        try:
            sec = self.neutron().show_security_group(
                self.resource_id)['security_group']
        except neutron_exp.NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            for rule in sec['security_group_rules']:
                if to_delete is None or to_delete(rule):
                    try:
                        self.neutron().delete_security_group_rule(rule['id'])
                    except neutron_exp.NeutronClientException as ex:
                        self._handle_not_found_exception(ex)

    def handle_delete(self):

        if self.resource_id is None:
            return

        self._delete_rules()
        try:
            self.neutron().delete_security_group(self.resource_id)
        except neutron_exp.NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        self.resource_id_set(None)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        props = self.prepare_update_properties(json_snippet)
        rules = props.pop(self.RULES, [])

        self.neutron().update_security_group(self.resource_id,
                                             {'security_group': props})

        # handle rules changes by:
        # * deleting all rules
        # * restoring the default egress rules
        # * creating new rules
        self._delete_rules()
        self._create_rules(self.default_egress_rules)
        if rules:
            self._create_rules(rules)
Пример #6
0
class MultipartMime(software_config.SoftwareConfig):
    '''
    A resource which assembles a collection of software configurations
    as a multi-part mime message.

    Parts in the message can be populated with inline configuration or
    references to other config resources. If the referenced resource is itself
    a valid multi-part mime message, that will be broken into parts and
    those parts appended to this message.

    The resulting multi-part mime message will be stored by the configs API
    and can be referenced in properties such as OS::Nova::Server user_data.

    This resource is generally used to build a list of cloud-init
    configuration elements including scripts and cloud-config. Since
    cloud-init is boot-only configuration, any changes to the definition
    will result in the replacement of all servers which reference it.
    '''

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (PARTS, CONFIG, FILENAME, TYPE,
                  SUBTYPE) = ('parts', 'config', 'filename', 'type', 'subtype')

    TYPES = (TEXT, MULTIPART) = ('text', 'multipart')

    properties_schema = {
        PARTS:
        properties.Schema(
            properties.Schema.LIST,
            _('Parts belonging to this message.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    CONFIG:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Content of part to attach, either inline or by '
                          'referencing the ID of another software config '
                          'resource'),
                        required=True),
                    FILENAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Optional filename to associate with part.')),
                    TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Whether the part content is text or multipart.'),
                        default=TEXT,
                        constraints=[constraints.AllowedValues(TYPES)]),
                    SUBTYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Optional subtype to specify with the type.')),
                }))
    }

    message = None

    def handle_create(self):
        props = {
            self.NAME: self.physical_resource_name(),
            self.CONFIG: self.get_message(),
            self.GROUP: 'Heat::Ungrouped'
        }
        sc = self.rpc_client().create_software_config(self.context, **props)
        self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID])

    def get_message(self):
        if self.message:
            return self.message

        subparts = []
        for item in self.properties.get(self.PARTS):
            config = item.get(self.CONFIG)
            part_type = item.get(self.TYPE, self.TEXT)
            part = config

            try:
                sc = self.rpc_client().show_software_config(
                    self.context, config)
            except Exception as ex:
                self.rpc_client().ignore_error_named(ex, 'NotFound')
            else:
                part = sc[rpc_api.SOFTWARE_CONFIG_CONFIG]

            if part_type == self.MULTIPART:
                self._append_multiparts(subparts, part)
            else:
                filename = item.get(self.FILENAME, '')
                subtype = item.get(self.SUBTYPE, '')
                self._append_part(subparts, part, subtype, filename)

        mime_blob = multipart.MIMEMultipart(_subparts=subparts)
        self.message = mime_blob.as_string()
        return self.message

    @staticmethod
    def _append_multiparts(subparts, multi_part):
        multi_parts = email.message_from_string(multi_part)
        if not multi_parts or not multi_parts.is_multipart():
            return

        for part in multi_parts.get_payload():
            MultipartMime._append_part(subparts, part.get_payload(),
                                       part.get_content_subtype(),
                                       part.get_filename())

    @staticmethod
    def _append_part(subparts, part, subtype, filename):
        if not subtype and filename:
            subtype = os.path.splitext(filename)[0]

        msg = MultipartMime._create_message(part, subtype, filename)
        subparts.append(msg)

    @staticmethod
    def _create_message(part, subtype, filename):
        msg = (text.MIMEText(part, _subtype=subtype)
               if subtype else text.MIMEText(part))
        if filename:
            msg.add_header('Content-Disposition',
                           'attachment',
                           filename=filename)
        return msg
class GridMember(resource.Resource):
    '''A resource which represents an Infoblox Grid Member.

    This is used to provision new grid members on an existing grid. See the
    Grid Master resource to create a new grid.
    '''

    PROPERTIES = (
        NAME,
        MODEL,
        LICENSES,
        TEMP_LICENSES,
        REMOTE_CONSOLE,
        ADMIN_PASSWORD,
        MGMT_PORT,
        LAN1_PORT,
        LAN2_PORT,
        HA_PORT,
        CONFIG_ADDR_TYPE,
        GM_IP,
        GM_CERTIFICATE,
        NAT_IP,
        # only 'enable' supported for now
        DNS_SETTINGS,
        DNS_ENABLE,
        DNS_RECURSIVE_RESOLVER,
        DNS_PORTS,
        DNS_ENABLE_FIXED_RRSET_ORDER_FQDNS,
        DNS_FIXED_RRSET_ORDER_FQDNS,
        DNS_USE_FIXED_RRSET_ORDER_FQDNS,
        DNS_DTC_HEALTH_SOURCE,
        DNS_DTC_HEALTH_SOURCE_ADDRESS,
        DNS_RPZ_QNAME_WAIT_RECURSE,
        DNS_USE_RPZ_QNAME_WAIT_RECURSE,
        DNS_LOG_DTC_GSLB,
        DNS_LOG_DTC_HEALTH,
        DNS_UNBOUND_LOGGING_LEVEL,
        HA_PAIR,
        VIP_PORT,
        USE_IPV4_VIP,
        VIRTUAL_ROUTER_ID,
        LAN2_VIRTUAL_ROUTER_ID,
        NODE2_MGMT_PORT,
        NODE2_LAN1_PORT,
        NODE2_LAN2_PORT,
        NODE2_HA_PORT,
        VIP_VLAN_ID,
        VIP6_VLAN_ID,
        UPDATE_ALLOWED_ADDRESS_PAIRS) = (
            'name', 'model', 'licenses', 'temp_licenses',
            'remote_console_enabled', 'admin_password', 'MGMT', 'LAN1', 'LAN2',
            'HA', 'config_addr_type', 'gm_ip', 'gm_certificate', 'nat_ip',
            'dns', 'enable', 'recursive_resolver', 'ports',
            'enable_fixed_rrset_order_fqdns', 'fixed_rrset_order_fqdns',
            'use_fixed_rrset_order_fqdns', 'dtc_health_source',
            'dtc_health_source_address', 'rpz_qname_wait_recurse',
            'use_rpz_qname_wait_recurse', 'log_dtc_glsb', 'log_dtc_health',
            'unbound_logging_level', 'ha_pair', 'VIP', 'use_ipv4_vip',
            'virtual_router_id', 'lan2_virtual_router_id', 'node2_MGMT',
            'node2_LAN1', 'node2_LAN2', 'node2_HA', 'vip_vlan_id',
            'vip6_vlan_id', 'update_allowed_address_pairs')

    ATTRIBUTES = (USER_DATA, NODE2_USER_DATA, NAME_ATTR,
                  DNS_UNBOUND_CAPABLE) = ('user_data', 'node2_user_data',
                                          'name', 'is_unbound_capable')

    ALLOWED_MODELS = ('CP-V1400', 'CP-V2200', 'CP-V800', 'IB-VM-100',
                      'IB-VM-1410', 'IB-VM-1420', 'IB-VM-2210', 'IB-VM-2220',
                      'IB-VM-4010', 'IB-VM-810', 'IB-VM-820', 'IB-VM-RSP',
                      'Rev1', 'Rev2')

    ALLOWED_LICENSES_PRE_PROVISION = ('cloud_api', 'dhcp', 'dns', 'dtc',
                                      'enterprise', 'fireeye', 'ms_management',
                                      'rpz', 'vnios')

    ALLOWED_LICENSES_TEMP = ('dns', 'rpz', 'cloud', 'cloud_api', 'enterprise',
                             'ipam', 'vnios', 'reporting')

    ALLOWED_CONFIG_ADDR_TYPES = ('IPV4', 'IPV6', 'BOTH')

    support_status = support.SupportStatus(
        support.UNSUPPORTED, _('See support.infoblox.com for support.'))

    properties_schema = {
        constants.CONNECTION:
        resource_utils.connection_schema(constants.DDI),
        NAME:
        properties.Schema(properties.Schema.STRING, _('Member name.')),
        MODEL:
        properties.Schema(
            properties.Schema.STRING,
            _('Infoblox model name.'),
            constraints=[constraints.AllowedValues(ALLOWED_MODELS)]),
        LICENSES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of licenses to pre-provision.'),
            schema=properties.Schema(properties.Schema.STRING),
            constraints=[
                constraints.AllowedValues(ALLOWED_LICENSES_PRE_PROVISION)
            ]),
        TEMP_LICENSES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of temporary licenses to apply to the member.'),
            schema=properties.Schema(properties.Schema.STRING),
            constraints=[constraints.AllowedValues(ALLOWED_LICENSES_TEMP)]),
        REMOTE_CONSOLE:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Enable the remote console.')),
        ADMIN_PASSWORD:
        properties.Schema(properties.Schema.STRING,
                          _('The password to use for the admin user.')),
        GM_IP:
        properties.Schema(properties.Schema.STRING,
                          _('The Gridmaster IP address.'),
                          required=True),
        GM_CERTIFICATE:
        properties.Schema(
            properties.Schema.STRING,
            _('The Gridmaster SSL certificate for verification.'),
            required=False),
        NAT_IP:
        properties.Schema(
            properties.Schema.STRING,
            _('If the GM will see this member as a NATed address, enter that '
              'address here.'),
            required=False),
        MGMT_PORT:
        resource_utils.port_schema(MGMT_PORT, False),
        LAN1_PORT:
        resource_utils.port_schema(LAN1_PORT, True),
        LAN2_PORT:
        resource_utils.port_schema(LAN2_PORT, False),
        HA_PORT:
        resource_utils.port_schema(HA_PORT, False),
        CONFIG_ADDR_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Address configuration types.'),
            constraints=[constraints.AllowedValues(ALLOWED_CONFIG_ADDR_TYPES)],
            default='IPV4'),
        DNS_SETTINGS:
        properties.Schema(properties.Schema.MAP,
                          _('The DNS settings for this member.'),
                          required=False,
                          schema={
                              DNS_ENABLE:
                              properties.Schema(
                                  properties.Schema.BOOLEAN,
                                  _('If true, enable DNS on this member.'),
                                  default=False),
                          }),
        HA_PAIR:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('"True" if member should be configured as HA pair.'),
            required=False,
            default=False),
        VIP_PORT:
        resource_utils.port_schema(VIP_PORT, False),
        USE_IPV4_VIP:
        properties.Schema(properties.Schema.BOOLEAN,
                          required=False,
                          default=True),
        VIRTUAL_ROUTER_ID:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Virtual Router ID. '
              'Warning: Must be unique on the local network.'),
            required=False,
        ),
        LAN2_VIRTUAL_ROUTER_ID:
        properties.Schema(
            properties.Schema.INTEGER,
            _('LAN2 Virtual Router ID. '
              'Should set if configured a LAN2 address.'),
            required=False,
        ),
        NODE2_MGMT_PORT:
        resource_utils.port_schema(NODE2_MGMT_PORT, False),
        NODE2_LAN1_PORT:
        resource_utils.port_schema(NODE2_LAN1_PORT, False),
        NODE2_LAN2_PORT:
        resource_utils.port_schema(NODE2_LAN2_PORT, False),
        NODE2_HA_PORT:
        resource_utils.port_schema(NODE2_HA_PORT, False),
        VIP_VLAN_ID:
        properties.Schema(
            properties.Schema.INTEGER,
            required=False,
        ),
        VIP6_VLAN_ID:
        properties.Schema(
            properties.Schema.INTEGER,
            required=False,
        ),
        UPDATE_ALLOWED_ADDRESS_PAIRS:
        properties.Schema(properties.Schema.BOOLEAN,
                          required=False,
                          default=True),
    }

    attributes_schema = {
        USER_DATA:
        attributes.Schema(_('User data for the Nova boot process.'),
                          attributes.Schema.STRING),
        NODE2_USER_DATA:
        attributes.Schema(_('Node 2 user data for the Nova boot process.'),
                          attributes.Schema.STRING),
        NAME_ATTR:
        attributes.Schema(_('The member name.'), attributes.Schema.STRING)
    }

    def _make_network_settings(self, ip):
        subnet = self.client('neutron').show_subnet(ip['subnet_id'])['subnet']
        ipnet = netaddr.IPNetwork(subnet['cidr'])
        vip = {
            'address': ip['ip_address'],
            'subnet_mask': str(ipnet.netmask),
            'gateway': subnet['gateway_ip']
        }, subnet
        if self.properties[self.VIP_VLAN_ID]:
            vip['vlan_id'] = self.properties[self.VIP_VLAN_ID]
        return vip

    def _make_ipv6_settings(self, ip):
        subnet = self.client('neutron').show_subnet(ip['subnet_id'])['subnet']
        prefix = netaddr.IPNetwork(subnet['cidr'])
        autocfg = subnet['ipv6_ra_mode'] == "slaac"
        vip6 = {
            'virtual_ip': ip['ip_address'],
            'cidr_prefix': int(prefix.prefixlen),
            'gateway': subnet['gateway_ip'],
            'enabled': True,
            'auto_router_config_enabled': autocfg
        }, subnet
        if self.properties[self.VIP6_VLAN_ID]:
            vip6['vlan_id'] = self.properties[self.VIP6_VLAN_ID]
        return vip6

    def infoblox(self):
        if not getattr(self, 'infoblox_object', None):
            conn = self.properties[constants.CONNECTION]
            self.infoblox_object = resource_utils.connect_to_infoblox(conn)
        return self.infoblox_object

    def _make_port_network_settings(self, port_name, return_subnets=False):
        if self.properties[port_name] is None:
            return None

        port = self.client('neutron').show_port(
            self.properties[port_name])['port']

        if port is None:
            return None

        ipv4 = None
        ipv6 = None
        ipv4_subnet = None
        ipv6_subnet = None
        for ip in port['fixed_ips']:
            if ':' in ip['ip_address'] and ipv6 is None:
                ipv6, ipv6_subnet = self._make_ipv6_settings(ip)
            else:
                if ipv4 is None:
                    ipv4, ipv4_subnet = self._make_network_settings(ip)
        result = {'ipv4': ipv4, 'ipv6': ipv6}
        if return_subnets:
            result['ipv4_subnet'] = ipv4_subnet
            result['ipv6_subnet'] = ipv6_subnet
        return result

    def handle_create(self):
        mgmt = self._make_port_network_settings(self.MGMT_PORT)
        lan1 = self._make_port_network_settings(self.LAN1_PORT)
        lan2 = self._make_port_network_settings(self.LAN2_PORT)

        name = self.properties[self.NAME]
        nat = self.properties[self.NAT_IP]

        ha_pair = self.properties[self.HA_PAIR]
        if ha_pair:
            vrid = self.properties[self.VIRTUAL_ROUTER_ID]
            lan2_vrid = self.properties[self.LAN2_VIRTUAL_ROUTER_ID]
            vip = self._make_port_network_settings(self.VIP_PORT)
            node1_ha = self._make_port_network_settings(self.HA_PORT)
            node2_ha = self._make_port_network_settings(self.NODE2_HA_PORT)
            node2_lan1 = self._make_port_network_settings(self.NODE2_LAN1_PORT)
            node2_mgmt = self._make_port_network_settings(self.NODE2_MGMT_PORT)
            use_ipv4_vip = self.properties[self.USE_IPV4_VIP]
            config_addr_type = self.properties[self.CONFIG_ADDR_TYPE]
            if self.properties[self.UPDATE_ALLOWED_ADDRESS_PAIRS]:
                # Add 'allowed_address_pairs' to HA ports.
                resource_utils.fix_ha_ports_mac(
                    self.client('neutron'), vip, vrid, use_ipv4_vip,
                    (self.properties[self.HA_PORT],
                     self.properties[self.NODE2_HA_PORT]))
            # Create infoblox HA pair member
            self.infoblox().create_member(name=name,
                                          config_addr_type=config_addr_type,
                                          mgmt=mgmt,
                                          vip=vip,
                                          lan2=lan2,
                                          nat_ip=nat,
                                          ha_pair=ha_pair,
                                          use_v4_vrrp=use_ipv4_vip,
                                          node1_ha=node1_ha,
                                          node2_ha=node2_ha,
                                          node1_lan1=lan1,
                                          node2_lan1=node2_lan1,
                                          node2_mgmt=node2_mgmt,
                                          vrid=vrid,
                                          lan2_vrid=lan2_vrid)
        else:
            self.infoblox().create_member(name=name,
                                          mgmt=mgmt,
                                          vip=lan1,
                                          lan2=lan2,
                                          nat_ip=nat)

        self.infoblox().pre_provision_member(
            name,
            hwmodel=self.properties[self.MODEL],
            hwtype='IB-VNIOS',
            licenses=self.properties[self.LICENSES],
            ha_pair=ha_pair)

        dns = self.properties[self.DNS_SETTINGS]
        if dns:
            self.infoblox().configure_member_dns(name,
                                                 enable_dns=dns['enable'])

        self.resource_id_set(name)

    def _remove_from_all_ns_groups(self):
        # This is a workaround needed because Juno Heat does not honor
        # dependencies in nested autoscale group stacks.
        fields = {'name', 'grid_primary', 'grid_secondaries'}
        with lockutils.lock(self.resource_id,
                            external=True,
                            lock_file_prefix='infoblox-ns_group-update'):
            groups = self.infoblox().get_all_ns_groups(return_fields=fields)
            for group in groups:
                new_list = {}
                changed = False
                for field in ('grid_primary', 'grid_secondaries'):
                    new_list[field] = []
                    for member in group[field]:
                        if member['name'] != self.resource_id:
                            new_list[field].append(member)
                        else:
                            changed = True
                if changed:
                    self.infoblox().update_ns_group(group['name'], new_list)

    def handle_delete(self):
        if self.resource_id is not None:
            self._remove_from_all_ns_groups()
            self.infoblox().delete_member(self.resource_id)

    def _get_dhcp_status_for_port(self, port_settings):
        status = {'ipv4': False, 'ipv6': False}

        if port_settings['ipv4'] and port_settings['ipv4_subnet']:
            status['ipv4'] = port_settings['ipv4_subnet']['enable_dhcp']

        if port_settings['ipv6'] and port_settings['ipv6_subnet']:
            status['ipv6'] = port_settings['ipv6_subnet']['enable_dhcp']
        return status

    def _make_user_data(self, member, token, node=0):
        user_data = '#infoblox-config\n\n'

        temp_licenses = self.properties[self.TEMP_LICENSES]
        if temp_licenses and len(temp_licenses) > 0:
            user_data += 'temp_license: %s\n' % ','.join(temp_licenses)

        remote_console = self.properties[self.REMOTE_CONSOLE]
        if remote_console is not None:
            user_data += 'remote_console_enabled: %s\n' % remote_console

        admin_password = self.properties[self.ADMIN_PASSWORD]
        if admin_password is not None:
            user_data += 'default_admin_password: %s\n' % admin_password

        vip = member.get('vip_setting', None)
        ipv6 = member.get('ipv6_setting', None)
        enable_ha = member.get('enable_ha', False)
        if ipv6 and not ipv6.get('enabled', False):
            ipv6 = None

        lan1 = self._make_port_network_settings(self.LAN1_PORT,
                                                return_subnets=True)
        dhcp_status = self._get_dhcp_status_for_port(lan1)
        # Do not generate userdata for port if dhcp is enabled in subnet
        need_vip = vip and not dhcp_status.get('ipv4')
        need_ipv6 = ipv6 and not dhcp_status.get('ipv6')

        if need_vip or need_ipv6:
            user_data += 'lan1:\n'

        if need_vip:
            if enable_ha:
                node_info = member.get('node_info')
                user_data += '  v4_addr: %s\n' % node_info[node][
                    'lan_ha_port_setting']['mgmt_lan']
            else:
                user_data += '  v4_addr: %s\n' % vip['address']
            user_data += '  v4_netmask: %s\n' % vip['subnet_mask']
            user_data += '  v4_gw: %s\n' % vip['gateway']

        if need_ipv6:
            user_data += '  v6_addr: %s\n' % ipv6['virtual_ip']
            user_data += '  v6_cidr: %s\n' % ipv6['cidr_prefix']
            if not ipv6['auto_router_config_enabled']:
                user_data += '  v6_gw: %s\n' % ipv6['gateway']

        if token and len(token) > 0:
            user_data += 'gridmaster:\n'
            user_data += '  token: %s\n' % token[node]['token']
            user_data += '  ip_addr: %s\n' % self.properties[self.GM_IP]
            user_data += '  certificate: |\n    %s\n' % self.properties[
                self.GM_CERTIFICATE].replace('\n', '\n    ')

        LOG.debug('user_data: %s' % user_data)

        return user_data

    def _get_member_tokens(self, member):
        token = self.infoblox().connector.call_func('read_token',
                                                    member['_ref'],
                                                    {})['pnode_tokens']
        if len(token) == 0:
            self.infoblox().connector.call_func('create_token', member['_ref'],
                                                {})['pnode_tokens']
            token = self.infoblox().connector.call_func(
                'read_token', member['_ref'], {})['pnode_tokens']
        return token

    def _resolve_attribute(self, name):
        member_name = self.resource_id
        member = self.infoblox().get_member_obj(member_name,
                                                fail_if_no_member=True,
                                                return_fields=[
                                                    'host_name', 'vip_setting',
                                                    'ipv6_setting',
                                                    'enable_ha', 'node_info'
                                                ])
        LOG.debug("MEMBER for %s = %s" % (name, member))
        if name == self.USER_DATA:
            token = self._get_member_tokens(member)
            return self._make_user_data(member, token, 0)
        if name == self.NODE2_USER_DATA:
            token = self._get_member_tokens(member)
            return self._make_user_data(member, token, 1)
        if name == self.NAME_ATTR:
            return member['host_name']
        return None
Пример #8
0
class DnsRecord(object):
    # all schemas
    fqdn_item_schema = properties.Schema(
        properties.Schema.STRING,
        _("Fully Qualified Domain Name"),
        required=True,
        update_allowed=False,
    )
    fqdn_schema = properties.Schema(
        properties.Schema.LIST,
        _("Fully Qualified Domain Name"),
        schema=fqdn_item_schema,
        required=False,
        update_allowed=True,
    )
    type_schema = properties.Schema(
        properties.Schema.STRING,
        _("DNS record type"),
        required=True,
        update_allowed=True,
        constraints=[
            constraints.AllowedValues(['DNS_RECORD_A', 'DNS_RECORD_AAAA', 'DNS_RECORD_ANY', 'DNS_RECORD_AXFR', 'DNS_RECORD_CNAME', 'DNS_RECORD_DNSKEY', 'DNS_RECORD_HINFO', 'DNS_RECORD_MX', 'DNS_RECORD_NS', 'DNS_RECORD_OPT', 'DNS_RECORD_OTHER', 'DNS_RECORD_PTR', 'DNS_RECORD_RP', 'DNS_RECORD_RRSIG', 'DNS_RECORD_SOA', 'DNS_RECORD_SRV', 'DNS_RECORD_TXT']),
        ],
    )
    ttl_schema = properties.Schema(
        properties.Schema.NUMBER,
        _("Time To Live for this DNS record"),
        required=False,
        update_allowed=True,
    )
    ip_address_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("IP address in A record"),
        schema=DnsARdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    ip_address_schema = properties.Schema(
        properties.Schema.LIST,
        _("IP address in A record"),
        schema=ip_address_item_schema,
        required=False,
        update_allowed=True,
    )
    service_locator_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("Service locator info in SRV record"),
        schema=DnsSrvRdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    service_locator_schema = properties.Schema(
        properties.Schema.LIST,
        _("Service locator info in SRV record"),
        schema=service_locator_item_schema,
        required=False,
        update_allowed=True,
    )
    cname_schema = properties.Schema(
        properties.Schema.MAP,
        _("Canonical name in CNAME record"),
        schema=DnsCnameRdata.properties_schema,
        required=False,
        update_allowed=True,
    )
    ns_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 17.1.1) Name Server information in NS record"),
        schema=DnsNsRdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    ns_schema = properties.Schema(
        properties.Schema.LIST,
        _("(Introduced in: 17.1.1) Name Server information in NS record"),
        schema=ns_item_schema,
        required=False,
        update_allowed=True,
    )
    num_records_in_response_schema = properties.Schema(
        properties.Schema.NUMBER,
        _("(Introduced in: 17.1.1) Specifies the number of records returned by the DNS service. Enter 0 to return all records. Default is 0 (Default: 0)"),
        required=False,
        update_allowed=True,
    )
    algorithm_schema = properties.Schema(
        properties.Schema.STRING,
        _("(Introduced in: 17.1.1) Specifies the algorithm to pick the IP address(es) to be returned, when multiple entries are configured. This does not apply if num_records_in_response is 0. Default is round-robin. (Default: DNS_RECORD_RESPONSE_ROUND_ROBIN)"),
        required=False,
        update_allowed=True,
        constraints=[
            constraints.AllowedValues(['DNS_RECORD_RESPONSE_CONSISTENT_HASH', 'DNS_RECORD_RESPONSE_ROUND_ROBIN']),
        ],
    )
    wildcard_match_schema = properties.Schema(
        properties.Schema.BOOLEAN,
        _("(Introduced in: 17.1.1) Enable wild-card match of fqdn: if an exact match is not found in the DNS table, the longest match is chosen by wild-carding the fqdn in the DNS request. Default is false. (Default: False)"),
        required=False,
        update_allowed=True,
    )
    delegated_schema = properties.Schema(
        properties.Schema.BOOLEAN,
        _("(Introduced in: 17.1.2) Configured FQDNs are delegated domains (i.e. they represent a zone cut). (Default: False)"),
        required=False,
        update_allowed=True,
    )
    ip6_address_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 18.1.1) IPv6 address in AAAA record"),
        schema=DnsAAAARdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    ip6_address_schema = properties.Schema(
        properties.Schema.LIST,
        _("(Introduced in: 18.1.1) IPv6 address in AAAA record"),
        schema=ip6_address_item_schema,
        required=False,
        update_allowed=True,
    )
    description_schema = properties.Schema(
        properties.Schema.STRING,
        _("Details of DNS record"),
        required=False,
        update_allowed=True,
    )

    # properties list
    PROPERTIES = (
        'fqdn',
        'type',
        'ttl',
        'ip_address',
        'service_locator',
        'cname',
        'ns',
        'num_records_in_response',
        'algorithm',
        'wildcard_match',
        'delegated',
        'ip6_address',
        'description',
    )

    # mapping of properties to their schemas
    properties_schema = {
        'fqdn': fqdn_schema,
        'type': type_schema,
        'ttl': ttl_schema,
        'ip_address': ip_address_schema,
        'service_locator': service_locator_schema,
        'cname': cname_schema,
        'ns': ns_schema,
        'num_records_in_response': num_records_in_response_schema,
        'algorithm': algorithm_schema,
        'wildcard_match': wildcard_match_schema,
        'delegated': delegated_schema,
        'ip6_address': ip6_address_schema,
        'description': description_schema,
    }

    # for supporting get_avi_uuid_by_name functionality
    field_references = {
        'ns': getattr(DnsNsRdata, 'field_references', {}),
        'ip6_address': getattr(DnsAAAARdata, 'field_references', {}),
        'cname': getattr(DnsCnameRdata, 'field_references', {}),
        'ip_address': getattr(DnsARdata, 'field_references', {}),
        'service_locator': getattr(DnsSrvRdata, 'field_references', {}),
    }

    unique_keys = {
        'ns': getattr(DnsNsRdata, 'unique_keys', {}),
        'ip6_address': getattr(DnsAAAARdata, 'unique_keys', {}),
        'cname': getattr(DnsCnameRdata, 'unique_keys', {}),
        'ip_address': getattr(DnsARdata, 'unique_keys', {}),
        'service_locator': getattr(DnsSrvRdata, 'unique_keys', {}),
    }
Пример #9
0
class AccessKey(resource.Resource):
    PROPERTIES = (
        SERIAL,
        USER_NAME,
        STATUS,
    ) = (
        'Serial',
        'UserName',
        'Status',
    )

    ATTRIBUTES = (
        USER_NAME,
        SECRET_ACCESS_KEY,
    ) = (
        'UserName',
        'SecretAccessKey',
    )

    properties_schema = {
        SERIAL:
        properties.Schema(properties.Schema.INTEGER,
                          _('Not Implemented.'),
                          implemented=False),
        USER_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The name of the user that the new key will belong to.'),
            required=True),
        STATUS:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          constraints=[
                              constraints.AllowedValues(['Active',
                                                         'Inactive']),
                          ],
                          implemented=False),
    }

    attributes_schema = {
        USER_NAME:
        attributes.Schema(_('Username associated with the AccessKey.'),
                          cache_mode=attributes.Schema.CACHE_NONE),
        SECRET_ACCESS_KEY:
        attributes.Schema(_('Keypair secret key.'),
                          cache_mode=attributes.Schema.CACHE_NONE),
    }

    def __init__(self, name, json_snippet, stack):
        super(AccessKey, self).__init__(name, json_snippet, stack)
        self._secret = None
        if self.resource_id:
            self._register_access_key()

    def _get_user(self):
        """
        Helper function to derive the keystone userid, which is stored in the
        resource_id of the User associated with this key.  We want to avoid
        looking the name up via listing keystone users, as this requires admin
        rights in keystone, so FnGetAtt which calls _secret_accesskey won't
        work for normal non-admin users
        """
        # Lookup User resource by intrinsic reference (which is what is passed
        # into the UserName parameter.  Would be cleaner to just make the User
        # resource return resource_id for FnGetRefId but the AWS definition of
        # user does say it returns a user name not ID
        return self.stack.resource_by_refid(self.properties[self.USER_NAME])

    def handle_create(self):
        user = self._get_user()
        if user is None:
            raise exception.NotFound(
                _('could not find user %s') % self.properties[self.USER_NAME])
        # The keypair is actually created and owned by the User resource
        kp = user._create_keypair()
        self.resource_id_set(kp.access)
        self._secret = kp.secret
        self._register_access_key()

        # Store the secret key, encrypted, in the DB so we don't have lookup
        # the user every time someone requests the SecretAccessKey attribute
        self.data_set('secret_key', kp.secret, redact=True)
        self.data_set('credential_id', kp.id, redact=True)

    def handle_delete(self):
        self._secret = None
        if self.resource_id is None:
            return

        user = self._get_user()
        if user is None:
            LOG.debug('Error deleting %s - user not found' % str(self))
            return
        user._delete_keypair()

    def _secret_accesskey(self):
        '''
        Return the user's access key, fetching it from keystone if necessary
        '''
        if self._secret is None:
            if not self.resource_id:
                LOG.info(
                    _LI('could not get secret for %(username)s '
                        'Error:%(msg)s'), {
                            'username': self.properties[self.USER_NAME],
                            'msg': "resource_id not yet set"
                        })
            else:
                # First try to retrieve the secret from resource_data, but
                # for backwards compatibility, fall back to requesting from
                # keystone
                self._secret = self.data().get('secret_key')
                if self._secret is None:
                    try:
                        user_id = self._get_user().resource_id
                        kp = self.keystone().get_ec2_keypair(
                            user_id=user_id, access=self.resource_id)
                        self._secret = kp.secret
                        # Store the key in resource_data
                        self.data_set('secret_key', kp.secret, redact=True)
                        # And the ID of the v3 credential
                        self.data_set('credential_id', kp.id, redact=True)
                    except Exception as ex:
                        LOG.info(
                            _LI('could not get secret for %(username)s '
                                'Error:%(msg)s'),
                            {
                                'username': self.properties[self.USER_NAME],
                                'msg': ex
                            })

        return self._secret or '000-000-000'

    def _resolve_attribute(self, name):
        if name == self.USER_NAME:
            return self.properties[self.USER_NAME]
        elif name == self.SECRET_ACCESS_KEY:
            return self._secret_accesskey()

    def _register_access_key(self):
        def access_allowed(resource_name):
            return self._get_user().access_allowed(resource_name)

        self.stack.register_access_allowed_handler(self.resource_id,
                                                   access_allowed)
Пример #10
0
class DnsRrSet(object):
    # all schemas
    fqdn_schema = properties.Schema(
        properties.Schema.STRING,
        _("(Introduced in: 18.1.2) Fully Qualified Domain Name"),
        required=True,
        update_allowed=True,
    )
    type_schema = properties.Schema(
        properties.Schema.STRING,
        _("(Introduced in: 18.1.2) DNS record type"),
        required=True,
        update_allowed=True,
        constraints=[
            constraints.AllowedValues(['DNS_RECORD_A', 'DNS_RECORD_AAAA', 'DNS_RECORD_ANY', 'DNS_RECORD_AXFR', 'DNS_RECORD_CNAME', 'DNS_RECORD_DNSKEY', 'DNS_RECORD_HINFO', 'DNS_RECORD_MX', 'DNS_RECORD_NS', 'DNS_RECORD_OPT', 'DNS_RECORD_OTHER', 'DNS_RECORD_PTR', 'DNS_RECORD_RP', 'DNS_RECORD_RRSIG', 'DNS_RECORD_SOA', 'DNS_RECORD_SRV', 'DNS_RECORD_TXT']),
        ],
    )
    ttl_schema = properties.Schema(
        properties.Schema.NUMBER,
        _("(Introduced in: 18.1.2) Time To Live for this DNS record"),
        required=True,
        update_allowed=True,
    )
    ip_addresses_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 18.1.2) IP address in A record"),
        schema=DnsARdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    ip_addresses_schema = properties.Schema(
        properties.Schema.LIST,
        _("(Introduced in: 18.1.2) IP address in A record"),
        schema=ip_addresses_item_schema,
        required=False,
        update_allowed=True,
    )
    cname_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 18.1.2) Canonical name in CNAME record"),
        schema=DnsCnameRdata.properties_schema,
        required=False,
        update_allowed=True,
    )
    nses_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 18.1.2) Name Server information in NS record"),
        schema=DnsNsRdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    nses_schema = properties.Schema(
        properties.Schema.LIST,
        _("(Introduced in: 18.1.2) Name Server information in NS record"),
        schema=nses_item_schema,
        required=False,
        update_allowed=True,
    )
    ip6_addresses_item_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 18.1.2) IPv6 address in AAAA record"),
        schema=DnsAAAARdata.properties_schema,
        required=True,
        update_allowed=False,
    )
    ip6_addresses_schema = properties.Schema(
        properties.Schema.LIST,
        _("(Introduced in: 18.1.2) IPv6 address in AAAA record"),
        schema=ip6_addresses_item_schema,
        required=False,
        update_allowed=True,
    )

    # properties list
    PROPERTIES = (
        'fqdn',
        'type',
        'ttl',
        'ip_addresses',
        'cname',
        'nses',
        'ip6_addresses',
    )

    # mapping of properties to their schemas
    properties_schema = {
        'fqdn': fqdn_schema,
        'type': type_schema,
        'ttl': ttl_schema,
        'ip_addresses': ip_addresses_schema,
        'cname': cname_schema,
        'nses': nses_schema,
        'ip6_addresses': ip6_addresses_schema,
    }

    # for supporting get_avi_uuid_by_name functionality
    field_references = {
        'ip_addresses': getattr(DnsARdata, 'field_references', {}),
        'cname': getattr(DnsCnameRdata, 'field_references', {}),
        'nses': getattr(DnsNsRdata, 'field_references', {}),
        'ip6_addresses': getattr(DnsAAAARdata, 'field_references', {}),
    }

    unique_keys = {
        'ip_addresses': getattr(DnsARdata, 'unique_keys', {}),
        'cname': getattr(DnsCnameRdata, 'unique_keys', {}),
        'nses': getattr(DnsNsRdata, 'unique_keys', {}),
        'ip6_addresses': getattr(DnsAAAARdata, 'unique_keys', {}),
    }
Пример #11
0
class DnsInfo(object):
    # all schemas
    fqdn_schema = properties.Schema(
        properties.Schema.STRING,
        _("Fully qualified domain name."),
        required=False,
        update_allowed=True,
    )
    ttl_schema = properties.Schema(
        properties.Schema.NUMBER,
        _("Time to live for fqdn record. Default value is chosen from DNS profile for this cloud if no value provided."),
        required=False,
        update_allowed=True,
    )
    type_schema = properties.Schema(
        properties.Schema.STRING,
        _("DNS record type (Default: DNS_RECORD_A)"),
        required=False,
        update_allowed=True,
        constraints=[
            constraints.AllowedValues(['DNS_RECORD_A', 'DNS_RECORD_AAAA', 'DNS_RECORD_ANY', 'DNS_RECORD_AXFR', 'DNS_RECORD_CNAME', 'DNS_RECORD_DNSKEY', 'DNS_RECORD_HINFO', 'DNS_RECORD_MX', 'DNS_RECORD_NS', 'DNS_RECORD_OPT', 'DNS_RECORD_OTHER', 'DNS_RECORD_PTR', 'DNS_RECORD_RP', 'DNS_RECORD_RRSIG', 'DNS_RECORD_SOA', 'DNS_RECORD_SRV', 'DNS_RECORD_TXT']),
        ],
    )
    num_records_in_response_schema = properties.Schema(
        properties.Schema.NUMBER,
        _("(Introduced in: 17.1.1) Specifies the number of records returned for this FQDN. Enter 0 to return all records. Default is 0 (Default: 1)"),
        required=False,
        update_allowed=True,
    )
    algorithm_schema = properties.Schema(
        properties.Schema.STRING,
        _("(Introduced in: 17.1.1) Specifies the algorithm to pick the IP address(es) to be returned, when multiple entries are configured. This does not apply if num_records_in_response is 0. Default is consistent hash. (Default: DNS_RECORD_RESPONSE_CONSISTENT_HASH)"),
        required=False,
        update_allowed=True,
        constraints=[
            constraints.AllowedValues(['DNS_RECORD_RESPONSE_CONSISTENT_HASH', 'DNS_RECORD_RESPONSE_ROUND_ROBIN']),
        ],
    )
    cname_schema = properties.Schema(
        properties.Schema.MAP,
        _("(Introduced in: 17.2.1) Canonical name in CNAME record."),
        schema=DnsCnameRdata.properties_schema,
        required=False,
        update_allowed=True,
    )
    metadata_schema = properties.Schema(
        properties.Schema.STRING,
        _("(Introduced in: 17.2.2) Any metadata associated with this record"),
        required=False,
        update_allowed=False,
    )

    # properties list
    PROPERTIES = (
        'fqdn',
        'ttl',
        'type',
        'num_records_in_response',
        'algorithm',
        'cname',
        'metadata',
    )

    # mapping of properties to their schemas
    properties_schema = {
        'fqdn': fqdn_schema,
        'ttl': ttl_schema,
        'type': type_schema,
        'num_records_in_response': num_records_in_response_schema,
        'algorithm': algorithm_schema,
        'cname': cname_schema,
        'metadata': metadata_schema,
    }

    # for supporting get_avi_uuid_by_name functionality
    field_references = {
        'cname': getattr(DnsCnameRdata, 'field_references', {}),
    }

    unique_keys = {
        'cname': getattr(DnsCnameRdata, 'unique_keys', {}),
        'my_key': 'fqdn',
    }
Пример #12
0
class Port(neutron.NeutronResource):

    PROPERTIES = (
        NETWORK_ID,
        NETWORK,
        NAME,
        VALUE_SPECS,
        ADMIN_STATE_UP,
        FIXED_IPS,
        MAC_ADDRESS,
        DEVICE_ID,
        SECURITY_GROUPS,
        ALLOWED_ADDRESS_PAIRS,
        DEVICE_OWNER,
        REPLACEMENT_POLICY,
        VNIC_TYPE,
    ) = (
        'network_id',
        'network',
        'name',
        'value_specs',
        'admin_state_up',
        'fixed_ips',
        'mac_address',
        'device_id',
        'security_groups',
        'allowed_address_pairs',
        'device_owner',
        'replacement_policy',
        'binding:vnic_type',
    )

    _FIXED_IP_KEYS = (
        FIXED_IP_SUBNET_ID,
        FIXED_IP_SUBNET,
        FIXED_IP_IP_ADDRESS,
    ) = (
        'subnet_id',
        'subnet',
        'ip_address',
    )

    _ALLOWED_ADDRESS_PAIR_KEYS = (
        ALLOWED_ADDRESS_PAIR_MAC_ADDRESS,
        ALLOWED_ADDRESS_PAIR_IP_ADDRESS,
    ) = (
        'mac_address',
        'ip_address',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        DEVICE_ID_ATTR,
        DEVICE_OWNER_ATTR,
        FIXED_IPS_ATTR,
        MAC_ADDRESS_ATTR,
        NAME_ATTR,
        NETWORK_ID_ATTR,
        SECURITY_GROUPS_ATTR,
        STATUS,
        TENANT_ID,
        ALLOWED_ADDRESS_PAIRS_ATTR,
        SHOW,
        SUBNETS_ATTR,
    ) = (
        'admin_state_up',
        'device_id',
        'device_owner',
        'fixed_ips',
        'mac_address',
        'name',
        'network_id',
        'security_groups',
        'status',
        'tenant_id',
        'allowed_address_pairs',
        'show',
        'subnets',
    )

    properties_schema = {
        NETWORK_ID:
        properties.Schema(properties.Schema.STRING,
                          support_status=support.SupportStatus(
                              support.DEPRECATED,
                              _('Use property %s.') % NETWORK)),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Network this port belongs to.'),
            support_status=support.SupportStatus(version='2014.2')),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('A symbolic name for this port.'),
                          update_allowed=True),
        VALUE_SPECS:
        properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the "port" object in the '
              'creation request.'),
            default={}),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of this port.'),
                          default=True,
                          update_allowed=True),
        FIXED_IPS:
        properties.Schema(
            properties.Schema.LIST,
            _('Desired IPs for this port.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    FIXED_IP_SUBNET_ID:
                    properties.Schema(properties.Schema.STRING,
                                      support_status=support.SupportStatus(
                                          support.DEPRECATED,
                                          _('Use property %s.') %
                                          FIXED_IP_SUBNET)),
                    FIXED_IP_SUBNET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Subnet in which to allocate the IP address for '
                          'this port.'),
                        support_status=support.SupportStatus(
                            version='2014.2')),
                    FIXED_IP_IP_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('IP address desired in the subnet for this port.')),
                },
            ),
            update_allowed=True),
        MAC_ADDRESS:
        properties.Schema(properties.Schema.STRING,
                          _('MAC address to give to this port.')),
        DEVICE_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Device ID of this port.'),
                          update_allowed=True),
        SECURITY_GROUPS:
        properties.Schema(properties.Schema.LIST,
                          _('Security group IDs to associate with this port.'),
                          update_allowed=True),
        ALLOWED_ADDRESS_PAIRS:
        properties.Schema(
            properties.Schema.LIST,
            _('Additional MAC/IP address pairs allowed to pass through the '
              'port.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOWED_ADDRESS_PAIR_MAC_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('MAC address to allow through this port.')),
                    ALLOWED_ADDRESS_PAIR_IP_ADDRESS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('IP address to allow through this port.'),
                        required=True),
                },
            )),
        DEVICE_OWNER:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the network owning the port. '
                            'The value is typically network:floatingip '
                            'or network:router_interface or network:dhcp'),
                          update_allowed=True),
        REPLACEMENT_POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to respond to a stack-update for this resource. '
              'REPLACE_ALWAYS will replace the port regardless of any '
              'property changes. AUTO will update the existing port for any '
              'changed update-allowed property.'),
            default='REPLACE_ALWAYS',
            constraints=[
                constraints.AllowedValues(['REPLACE_ALWAYS', 'AUTO']),
            ],
            update_allowed=True),
        VNIC_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The vnic type to be bound on the neutron port. '
              'To support SR-IOV PCI passthrough networking, you can request '
              'that the neutron port to be realized as normal (virtual nic), '
              'direct (pci passthrough), or macvtap '
              '(virtual interface with a tap-like software interface). Note'
              ' that this only works for Neutron deployments that support '
              'the bindings extension.'),
            constraints=[
                constraints.AllowedValues(['normal', 'direct', 'macvtap']),
            ],
            update_allowed=True),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_("The administrative state of this port.")),
        DEVICE_ID_ATTR:
        attributes.Schema(_("Unique identifier for the device.")),
        DEVICE_OWNER:
        attributes.Schema(_("Name of the network owning the port.")),
        FIXED_IPS_ATTR:
        attributes.Schema(_("Fixed IP addresses.")),
        MAC_ADDRESS_ATTR:
        attributes.Schema(_("MAC address of the port.")),
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the port.")),
        NETWORK_ID_ATTR:
        attributes.Schema(
            _("Unique identifier for the network owning the port.")),
        SECURITY_GROUPS_ATTR:
        attributes.Schema(_("A list of security groups for the port.")),
        STATUS:
        attributes.Schema(_("The status of the port.")),
        TENANT_ID:
        attributes.Schema(_("Tenant owning the port.")),
        ALLOWED_ADDRESS_PAIRS_ATTR:
        attributes.Schema(
            _("Additional MAC/IP address pairs allowed to pass through "
              "a port.")),
        SHOW:
        attributes.Schema(_("All attributes.")),
        SUBNETS_ATTR:
        attributes.Schema(_("A list of all subnet attributes for the port.")),
    }

    def validate(self):
        super(Port, self).validate()
        self._validate_depr_property_required(self.properties, self.NETWORK,
                                              self.NETWORK_ID)

    def add_dependencies(self, deps):
        super(Port, self).add_dependencies(deps)
        # Depend on any Subnet in this template with the same
        # network_id as this network_id.
        # It is not known which subnet a port might be assigned
        # to so all subnets in a network should be created before
        # the ports in that network.
        for res in self.stack.itervalues():
            if res.has_interface('OS::Neutron::Subnet'):
                dep_network = res.properties.get(
                    subnet.Subnet.NETWORK) or res.properties.get(
                        subnet.Subnet.NETWORK_ID)
                network = self.properties.get(
                    self.NETWORK) or self.properties.get(self.NETWORK_ID)
                if dep_network == network:
                    deps += (self, res)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        self.client_plugin().resolve_network(props, self.NETWORK, 'network_id')
        self._prepare_port_properties(props)

        port = self.neutron().create_port({'port': props})['port']
        self.resource_id_set(port['id'])

    def _prepare_port_properties(self, props, prepare_for_update=False):
        for fixed_ip in props.get(self.FIXED_IPS, []):
            for key, value in fixed_ip.items():
                if value is None:
                    fixed_ip.pop(key)
            if fixed_ip.get(self.FIXED_IP_SUBNET):
                self.client_plugin().resolve_subnet(fixed_ip,
                                                    self.FIXED_IP_SUBNET,
                                                    'subnet_id')
        # delete empty MAC addresses so that Neutron validation code
        # wouldn't fail as it not accepts Nones
        for pair in props.get(self.ALLOWED_ADDRESS_PAIRS, []):
            if (self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS in pair
                    and pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] is None):
                del pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS]

        # if without 'security_groups', don't set the 'security_groups'
        # property when creating, neutron will create the port with the
        # 'default' securityGroup. If has the 'security_groups' and the
        # value is [], which means to create the port without securityGroup.
        if props.get(self.SECURITY_GROUPS) is not None:
            props[self.SECURITY_GROUPS] = self.client_plugin(
            ).get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
        else:
            # And the update should has the same behavior.
            if prepare_for_update:
                props[self.SECURITY_GROUPS] = self.client_plugin(
                ).get_secgroup_uuids(['default'])

        if not props[self.FIXED_IPS]:
            del (props[self.FIXED_IPS])

        del (props[self.REPLACEMENT_POLICY])

    def _show_resource(self):
        return self.neutron().show_port(self.resource_id)['port']

    def check_create_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def handle_delete(self):
        client = self.neutron()
        try:
            client.delete_port(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def _resolve_attribute(self, name):
        if name == self.SUBNETS_ATTR:
            subnets = []
            try:
                fixed_ips = self._show_resource().get('fixed_ips', [])
                for fixed_ip in fixed_ips:
                    subnet_id = fixed_ip.get('subnet_id')
                    if subnet_id:
                        subnets.append(
                            self.neutron().show_subnet(subnet_id)['subnet'])
            except Exception as ex:
                LOG.warn(_LW("Failed to fetch resource attributes: %s"), ex)
                return
            return subnets
        return super(Port, self)._resolve_attribute(name)

    def _needs_update(self, after, before, after_props, before_props,
                      prev_resource):

        if after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS':
            raise resource.UpdateReplace(self.name)

        return super(Port, self)._needs_update(after, before, after_props,
                                               before_props, prev_resource)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        props = self.prepare_update_properties(json_snippet)

        self._prepare_port_properties(props, prepare_for_update=True)
        LOG.debug('updating port with %s' % props)
        self.neutron().update_port(self.resource_id, {'port': props})

    def check_update_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)
Пример #13
0
class DesignateRecordSet(resource.Resource):
    """Heat Template Resource for Designate RecordSet.

    Designate provides DNS-as-a-Service services for OpenStack. RecordSet
    helps to add more than one records.
    """

    support_status = support.SupportStatus(version='8.0.0')

    PROPERTIES = (NAME, TTL, DESCRIPTION, TYPE, RECORDS,
                  ZONE) = ('name', 'ttl', 'description', 'type', 'records',
                           'zone')

    _ALLOWED_TYPES = (A, AAAA, CNAME, MX, SRV, TXT, SPF, NS, PTR, SSHFP,
                      SOA) = ('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF',
                              'NS', 'PTR', 'SSHFP', 'SOA')

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('RecordSet name.'),
                          constraints=[constraints.Length(max=255)]),
        # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
        TTL:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds).'),
            update_allowed=True,
            constraints=[constraints.Range(min=1, max=2147483647)]),
        # designate mandates to the max length of 160 for description
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of RecordSet.'),
                          update_allowed=True,
                          constraints=[constraints.Length(max=160)]),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS RecordSet type.'),
            required=True,
            constraints=[constraints.AllowedValues(_ALLOWED_TYPES)]),
        RECORDS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of data for this RecordSet. Each item will be a '
              'separate record in Designate These items should conform to the '
              'DNS spec for the record type - e.g. A records must be IPv4 '
              'addresses, CNAME records must be a hostname. DNS record data '
              'varies based on the type of record. For more details, please '
              'refer rfc 1035.'),
            update_allowed=True,
            required=True),
        ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS Zone id or name.'),
            required=True,
            constraints=[constraints.CustomConstraint('designate.zone')]),
    }

    default_client_name = 'designate'

    entity = 'recordsets'

    def client(self):
        return super(DesignateRecordSet,
                     self).client(version=self.client_plugin().V2)

    def handle_create(self):
        args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
        args['type_'] = args.pop(self.TYPE)
        if not args.get(self.NAME):
            args[self.NAME] = self.physical_resource_name()

        rs = self.client().recordsets.create(**args)

        self.resource_id_set(rs['id'])

    def _check_status_complete(self):
        recordset = self.client().recordsets.get(
            recordset=self.resource_id, zone=self.properties[self.ZONE])

        if recordset['status'] == 'ERROR':
            raise exception.ResourceInError(
                resource_status=recordset['status'],
                status_reason=_('Error in RecordSet'))

        return recordset['status'] != 'PENDING'

    def check_create_complete(self, handler_data=None):
        return self._check_status_complete()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict()

        for prp in (self.TTL, self.DESCRIPTION, self.RECORDS):
            if prop_diff.get(prp):
                args[prp] = prop_diff.get(prp)

        if prop_diff.get(self.TYPE):
            args['type_'] = prop_diff.get(self.TYPE)

        if len(args.keys()) > 0:
            self.client().recordsets.update(recordset=self.resource_id,
                                            zone=self.properties[self.ZONE],
                                            values=args)

    def check_update_complete(self, handler_data=None):
        return self._check_status_complete()

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().recordsets.delete(
                    recordset=self.resource_id,
                    zone=self.properties[self.ZONE])

    def check_delete_complete(self, handler_data=None):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                return self._check_status_complete()

        return True

    def _show_resource(self):
        return self.client().recordsets.get(recordset=self.resource_id,
                                            zone=self.properties[self.ZONE])
Пример #14
0
Файл: port.py Проект: sdake/heat
class Port(neutron.NeutronResource):
    """A resource for managing Neutron ports.

    A port represents a virtual switch port on a logical network switch.
    Virtual instances attach their interfaces into ports. The logical port also
    defines the MAC address and the IP address(es) to be assigned to the
    interfaces plugged into them. When IP addresses are associated to a port,
    this also implies the port is associated with a subnet, as the IP address
    was taken from the allocation pool for a specific subnet.
    """

    entity = 'port'

    PROPERTIES = (
        NAME, NETWORK_ID, NETWORK, FIXED_IPS, SECURITY_GROUPS,
        REPLACEMENT_POLICY, DEVICE_ID, DEVICE_OWNER, DNS_NAME,
        TAGS,
    ) = (
        'name', 'network_id', 'network', 'fixed_ips', 'security_groups',
        'replacement_policy', 'device_id', 'device_owner', 'dns_name',
        'tags',
    )

    EXTRA_PROPERTIES = (
        VALUE_SPECS, ADMIN_STATE_UP, MAC_ADDRESS,
        ALLOWED_ADDRESS_PAIRS, VNIC_TYPE, QOS_POLICY,
        PORT_SECURITY_ENABLED,
    ) = (
        'value_specs', 'admin_state_up', 'mac_address',
        'allowed_address_pairs', 'binding:vnic_type', 'qos_policy',
        'port_security_enabled',
    )

    _FIXED_IP_KEYS = (
        FIXED_IP_SUBNET_ID, FIXED_IP_SUBNET, FIXED_IP_IP_ADDRESS,
    ) = (
        'subnet_id', 'subnet', 'ip_address',
    )

    _ALLOWED_ADDRESS_PAIR_KEYS = (
        ALLOWED_ADDRESS_PAIR_MAC_ADDRESS, ALLOWED_ADDRESS_PAIR_IP_ADDRESS,
    ) = (
        'mac_address', 'ip_address',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR, DEVICE_ID_ATTR, DEVICE_OWNER_ATTR, FIXED_IPS_ATTR,
        MAC_ADDRESS_ATTR, NAME_ATTR, NETWORK_ID_ATTR, SECURITY_GROUPS_ATTR,
        STATUS, TENANT_ID, ALLOWED_ADDRESS_PAIRS_ATTR, SUBNETS_ATTR,
        PORT_SECURITY_ENABLED_ATTR, QOS_POLICY_ATTR, DNS_ASSIGNMENT,
    ) = (
        'admin_state_up', 'device_id', 'device_owner', 'fixed_ips',
        'mac_address', 'name', 'network_id', 'security_groups',
        'status', 'tenant_id', 'allowed_address_pairs', 'subnets',
        'port_security_enabled', 'qos_policy_id', 'dns_assignment',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('A symbolic name for this port.'),
            update_allowed=True
        ),
        NETWORK_ID: properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % NETWORK,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    version='2014.2'
                )
            ),
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
        ),

        NETWORK: properties.Schema(
            properties.Schema.STRING,
            _('Network this port belongs to. If you plan to use current port '
              'to assign Floating IP, you should specify %(fixed_ips)s '
              'with %(subnet)s. Note if this changes to a different network '
              'update, the port will be replaced.') %
            {'fixed_ips': FIXED_IPS, 'subnet': FIXED_IP_SUBNET},
            support_status=support.SupportStatus(version='2014.2'),
            required=True,
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
        ),
        DEVICE_ID: properties.Schema(
            properties.Schema.STRING,
            _('Device ID of this port.'),
            update_allowed=True
        ),
        DEVICE_OWNER: properties.Schema(
            properties.Schema.STRING,
            _('Name of the network owning the port. '
              'The value is typically network:floatingip '
              'or network:router_interface or network:dhcp.'),
            update_allowed=True
        ),
        FIXED_IPS: properties.Schema(
            properties.Schema.LIST,
            _('Desired IPs for this port.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    FIXED_IP_SUBNET_ID: properties.Schema(
                        properties.Schema.STRING,
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            version='5.0.0',
                            message=_('Use property %s.') % FIXED_IP_SUBNET,
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED,
                                version='2014.2 '
                            )
                        ),
                        constraints=[
                            constraints.CustomConstraint('neutron.subnet')
                        ]
                    ),
                    FIXED_IP_SUBNET: properties.Schema(
                        properties.Schema.STRING,
                        _('Subnet in which to allocate the IP address for '
                          'this port.'),
                        support_status=support.SupportStatus(version='2014.2'),
                        constraints=[
                            constraints.CustomConstraint('neutron.subnet')
                        ]
                    ),
                    FIXED_IP_IP_ADDRESS: properties.Schema(
                        properties.Schema.STRING,
                        _('IP address desired in the subnet for this port.'),
                        constraints=[
                            constraints.CustomConstraint('ip_addr')
                        ]
                    ),
                },
            ),
            update_allowed=True
        ),
        SECURITY_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _('Security group IDs to associate with this port.'),
            update_allowed=True
        ),
        REPLACEMENT_POLICY: properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to respond to a stack-update for this resource. '
              'REPLACE_ALWAYS will replace the port regardless of any '
              'property changes. AUTO will update the existing port for any '
              'changed update-allowed property.'),
            default='AUTO',
            constraints=[
                constraints.AllowedValues(['REPLACE_ALWAYS', 'AUTO']),
            ],
            update_allowed=True,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='9.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    version='6.0.0',
                    message=_('Replacement policy used to work around flawed '
                              'nova/neutron port interaction which has been '
                              'fixed since Liberty.'),
                    previous_status=support.SupportStatus(version='2014.2')
                )
            )
        ),
        DNS_NAME: properties.Schema(
            properties.Schema.STRING,
            _('DNS name associated with the port.'),
            update_allowed=True,
            constraints=[
                constraints.CustomConstraint('dns_name')
            ],
            support_status=support.SupportStatus(version='7.0.0'),
        ),
        TAGS: properties.Schema(
            properties.Schema.LIST,
            _('The tags to be added to the port.'),
            schema=properties.Schema(properties.Schema.STRING),
            update_allowed=True,
            support_status=support.SupportStatus(version='9.0.0')
        ),
    }

    # NOTE(prazumovsky): properties_schema has been separated because some
    # properties used in server for creating internal port.
    extra_properties_schema = {
        VALUE_SPECS: properties.Schema(
            properties.Schema.MAP,
            _('Extra parameters to include in the request.'),
            default={},
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of this port.'),
            default=True,
            update_allowed=True
        ),
        MAC_ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('MAC address to give to this port. The default update policy '
              'of this property in neutron is that allow admin role only.'),
            constraints=[
                constraints.CustomConstraint('mac_addr')
            ],
            update_allowed=True,
        ),
        ALLOWED_ADDRESS_PAIRS: properties.Schema(
            properties.Schema.LIST,
            _('Additional MAC/IP address pairs allowed to pass through the '
              'port.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOWED_ADDRESS_PAIR_MAC_ADDRESS: properties.Schema(
                        properties.Schema.STRING,
                        _('MAC address to allow through this port.'),
                        constraints=[
                            constraints.CustomConstraint('mac_addr')
                        ]
                    ),
                    ALLOWED_ADDRESS_PAIR_IP_ADDRESS: properties.Schema(
                        properties.Schema.STRING,
                        _('IP address to allow through this port.'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('net_cidr')
                        ]
                    ),
                },
            ),
            update_allowed=True,
        ),
        VNIC_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('The vnic type to be bound on the neutron port. '
              'To support SR-IOV PCI passthrough networking, you can request '
              'that the neutron port to be realized as normal (virtual nic), '
              'direct (pci passthrough), or macvtap '
              '(virtual interface with a tap-like software interface). Note '
              'that this only works for Neutron deployments that support '
              'the bindings extension.'),
            constraints=[
                constraints.AllowedValues(['normal', 'direct', 'macvtap',
                                           'direct-physical', 'baremetal']),
            ],
            support_status=support.SupportStatus(version='2015.1'),
            update_allowed=True
        ),
        PORT_SECURITY_ENABLED: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Flag to enable/disable port security on the port. '
              'When disable this feature(set it to False), there will be no '
              'packages filtering, like security-group and address-pairs.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='5.0.0')
        ),
        QOS_POLICY: properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of QoS policy to attach to this port.'),
            constraints=[
                constraints.CustomConstraint('neutron.qos_policy')
            ],
            update_allowed=True,
            support_status=support.SupportStatus(version='6.0.0')
        ),
    }

    # Need to update properties_schema with other properties before
    # initialisation, because resource should contain all properties before
    # creating. Also, documentation should correctly resolves resource
    # properties schema.
    properties_schema.update(extra_properties_schema)

    attributes_schema = {
        ADMIN_STATE_UP_ATTR: attributes.Schema(
            _("The administrative state of this port."),
            type=attributes.Schema.STRING
        ),
        DEVICE_ID_ATTR: attributes.Schema(
            _("Unique identifier for the device."),
            type=attributes.Schema.STRING
        ),
        DEVICE_OWNER: attributes.Schema(
            _("Name of the network owning the port."),
            type=attributes.Schema.STRING
        ),
        FIXED_IPS_ATTR: attributes.Schema(
            _("Fixed IP addresses."),
            type=attributes.Schema.LIST
        ),
        MAC_ADDRESS_ATTR: attributes.Schema(
            _("MAC address of the port."),
            type=attributes.Schema.STRING
        ),
        NAME_ATTR: attributes.Schema(
            _("Friendly name of the port."),
            type=attributes.Schema.STRING
        ),
        NETWORK_ID_ATTR: attributes.Schema(
            _("Unique identifier for the network owning the port."),
            type=attributes.Schema.STRING
        ),
        SECURITY_GROUPS_ATTR: attributes.Schema(
            _("A list of security groups for the port."),
            type=attributes.Schema.LIST
        ),
        STATUS: attributes.Schema(
            _("The status of the port."),
            type=attributes.Schema.STRING
        ),
        TENANT_ID: attributes.Schema(
            _("Tenant owning the port."),
            type=attributes.Schema.STRING
        ),
        ALLOWED_ADDRESS_PAIRS_ATTR: attributes.Schema(
            _("Additional MAC/IP address pairs allowed to pass through "
              "a port."),
            type=attributes.Schema.LIST
        ),
        SUBNETS_ATTR: attributes.Schema(
            _("A list of all subnet attributes for the port."),
            type=attributes.Schema.LIST
        ),
        PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
            _("Port security enabled of the port."),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.BOOLEAN
        ),
        QOS_POLICY_ATTR: attributes.Schema(
            _("The QoS policy ID attached to this port."),
            type=attributes.Schema.STRING,
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        DNS_ASSIGNMENT: attributes.Schema(
            _("The DNS assigned to this port."),
            type=attributes.Schema.MAP,
            support_status=support.SupportStatus(version='7.0.0'),
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.REPLACE,
                [self.NETWORK],
                value_path=[self.NETWORK_ID]
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.REPLACE,
                [self.FIXED_IPS, self.FIXED_IP_SUBNET],
                value_name=self.FIXED_IP_SUBNET_ID
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.NETWORK],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='network'
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.FIXED_IPS, self.FIXED_IP_SUBNET],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='subnet'
            )
        ]

    def add_dependencies(self, deps):
        super(Port, self).add_dependencies(deps)
        # Depend on any Subnet in this template with the same
        # network_id as this network_id.
        # It is not known which subnet a port might be assigned
        # to so all subnets in a network should be created before
        # the ports in that network.
        for res in six.itervalues(self.stack):
            if res.has_interface('OS::Neutron::Subnet'):
                dep_network = res.properties.get(subnet.Subnet.NETWORK)
                network = self.properties[self.NETWORK]
                if dep_network == network:
                    deps += (self, res)

    def handle_create(self):
        props = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        props['network_id'] = props.pop(self.NETWORK)
        self._prepare_port_properties(props)
        qos_policy = props.pop(self.QOS_POLICY, None)
        tags = props.pop(self.TAGS, [])
        if qos_policy:
            props['qos_policy_id'] = self.client_plugin().get_qos_policy_id(
                qos_policy)

        port = self.client().create_port({'port': props})['port']
        self.resource_id_set(port['id'])

        if tags:
            self.set_tags(tags)

    def _prepare_port_properties(self, props, prepare_for_update=False):
        if self.FIXED_IPS in props:
            fixed_ips = props[self.FIXED_IPS]
            if fixed_ips:
                for fixed_ip in fixed_ips:
                    for key, value in list(fixed_ip.items()):
                        if value is None:
                            fixed_ip.pop(key)
                    if self.FIXED_IP_SUBNET in fixed_ip:
                        fixed_ip[
                            'subnet_id'] = fixed_ip.pop(self.FIXED_IP_SUBNET)
            else:
                # Passing empty list would have created a port without
                # fixed_ips during CREATE and released the existing
                # fixed_ips during UPDATE (default neutron behaviour).
                # However, for backward compatibility we will let neutron
                # assign ip for CREATE and leave the assigned ips during
                # UPDATE by not passing it. ref bug #1538473.
                del props[self.FIXED_IPS]
        # delete empty MAC addresses so that Neutron validation code
        # wouldn't fail as it not accepts Nones
        if self.ALLOWED_ADDRESS_PAIRS in props:
            address_pairs = props[self.ALLOWED_ADDRESS_PAIRS]
            if address_pairs:
                for pair in address_pairs:
                    if (self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS in pair
                        and pair[
                            self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] is None):
                        del pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS]
            else:
                props[self.ALLOWED_ADDRESS_PAIRS] = []
        # if without 'security_groups', don't set the 'security_groups'
        # property when creating, neutron will create the port with the
        # 'default' securityGroup. If has the 'security_groups' and the
        # value is [], which means to create the port without securityGroup.
        if self.SECURITY_GROUPS in props:
            if props.get(self.SECURITY_GROUPS) is not None:
                props[self.SECURITY_GROUPS] = self.client_plugin(
                ).get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
            else:
                # And the update should has the same behavior.
                if prepare_for_update:
                    props[self.SECURITY_GROUPS] = self.client_plugin(
                    ).get_secgroup_uuids(['default'])

        if self.REPLACEMENT_POLICY in props:
            del(props[self.REPLACEMENT_POLICY])

    def _store_config_default_properties(self, attrs):
        """A method for storing properties default values.

        A method allows to store properties default values, which cannot be
        defined in schema in case of specifying in config file.
        """
        super(Port, self)._store_config_default_properties(attrs)
        if self.VNIC_TYPE in attrs:
            self.data_set(self.VNIC_TYPE, attrs[self.VNIC_TYPE])

    def check_create_complete(self, *args):
        attributes = self._show_resource()
        self._store_config_default_properties(attributes)
        return self.is_built(attributes)

    def handle_delete(self):
        try:
            self.client().delete_port(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def parse_live_resource_data(self, resource_properties, resource_data):
        result = super(Port, self).parse_live_resource_data(
            resource_properties, resource_data)
        result[self.QOS_POLICY] = resource_data.get('qos_policy_id')
        result.pop(self.MAC_ADDRESS)
        fixed_ips = resource_data.get(self.FIXED_IPS) or []
        if fixed_ips:
            result.update({self.FIXED_IPS: []})
            for fixed_ip in fixed_ips:
                result[self.FIXED_IPS].append(
                    {self.FIXED_IP_SUBNET: fixed_ip.get('subnet_id'),
                     self.FIXED_IP_IP_ADDRESS: fixed_ip.get('ip_address')})
        return result

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.SUBNETS_ATTR:
            subnets = []
            try:
                fixed_ips = self._show_resource().get('fixed_ips', [])
                for fixed_ip in fixed_ips:
                    subnet_id = fixed_ip.get('subnet_id')
                    if subnet_id:
                        subnets.append(self.client().show_subnet(
                            subnet_id)['subnet'])
            except Exception as ex:
                LOG.warning("Failed to fetch resource attributes: %s", ex)
                return
            return subnets
        return super(Port, self)._resolve_attribute(name)

    def needs_replace(self, after_props):
        """Mandatory replace based on props."""
        return after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS'

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            if self.QOS_POLICY in prop_diff:
                qos_policy = prop_diff.pop(self.QOS_POLICY)
                prop_diff['qos_policy_id'] = self.client_plugin(
                    ).get_qos_policy_id(qos_policy) if qos_policy else None

            if self.TAGS in prop_diff:
                tags = prop_diff.pop(self.TAGS)
                self.set_tags(tags)

            self._prepare_port_properties(prop_diff, prepare_for_update=True)
            LOG.debug('updating port with %s', prop_diff)
            self.client().update_port(self.resource_id, {'port': prop_diff})

    def check_update_complete(self, *args):
        attributes = self._show_resource()
        return self.is_built(attributes)

    def prepare_for_replace(self):
        # if the port has not been created yet, return directly
        if self.resource_id is None:
            return
        # store port fixed_ips for restoring after failed update
        fixed_ips = self._show_resource().get('fixed_ips', [])
        self.data_set('port_fip', jsonutils.dumps(fixed_ips))
        # reset fixed_ips for this port by setting fixed_ips to []
        props = {'fixed_ips': []}
        self.client().update_port(self.resource_id, {'port': props})

    def restore_prev_rsrc(self, convergence=False):
        # In case of convergence, during rollback, the previous rsrc is
        # already selected and is being acted upon.
        if convergence:
            prev_port = self
            existing_port, rsrc_owning_stack, stack = resource.Resource.load(
                prev_port.context, prev_port.replaced_by,
                prev_port.stack.current_traversal, True,
                prev_port.stack.defn._resource_data
            )
            existing_port_id = existing_port.resource_id
        else:
            backup_stack = self.stack._backup_stack()
            prev_port = backup_stack.resources.get(self.name)
            existing_port_id = self.resource_id

        if existing_port_id:
            # reset fixed_ips to [] for new resource
            props = {'fixed_ips': []}
            self.client().update_port(existing_port_id, {'port': props})

        fixed_ips = prev_port.data().get('port_fip', [])
        if fixed_ips and prev_port.resource_id:
            # restore ip for old port
            prev_port_props = {'fixed_ips': jsonutils.loads(fixed_ips)}
            self.client().update_port(prev_port.resource_id,
                                      {'port': prev_port_props})
Пример #15
0
class FirewallRule(neutron.NeutronResource):
    """
    A resource for the FirewallRule resource in Neutron FWaaS.
    """

    PROPERTIES = (
        NAME,
        DESCRIPTION,
        SHARED,
        PROTOCOL,
        IP_VERSION,
        SOURCE_IP_ADDRESS,
        DESTINATION_IP_ADDRESS,
        SOURCE_PORT,
        DESTINATION_PORT,
        ACTION,
        ENABLED,
    ) = (
        'name',
        'description',
        'shared',
        'protocol',
        'ip_version',
        'source_ip_address',
        'destination_ip_address',
        'source_port',
        'destination_port',
        'action',
        'enabled',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        DESCRIPTION_ATTR,
        FIREWALL_POLICY_ID,
        SHARED_ATTR,
        PROTOCOL_ATTR,
        IP_VERSION_ATTR,
        SOURCE_IP_ADDRESS_ATTR,
        DESTINATION_IP_ADDRESS_ATTR,
        SOURCE_PORT_ATTR,
        DESTINATION_PORT_ATTR,
        ACTION_ATTR,
        ENABLED_ATTR,
        POSITION,
        TENANT_ID,
    ) = (
        'name',
        'description',
        'firewall_policy_id',
        'shared',
        'protocol',
        'ip_version',
        'source_ip_address',
        'destination_ip_address',
        'source_port',
        'destination_port',
        'action',
        'enabled',
        'position',
        'tenant_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name for the firewall rule.'),
                          update_allowed=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the firewall rule.'),
                          update_allowed=True),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this rule should be shared across all tenants.'),
            default=False,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(
            properties.Schema.STRING,
            _('Protocol for the firewall rule.'),
            constraints=[
                constraints.AllowedValues(['tcp', 'udp', 'icmp', 'any']),
            ],
            default='any',
            update_allowed=True,
        ),
        IP_VERSION:
        properties.Schema(properties.Schema.STRING,
                          _('Internet protocol version.'),
                          default='4',
                          constraints=[
                              constraints.AllowedValues(['4', '6']),
                          ],
                          update_allowed=True),
        SOURCE_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Source IP address or CIDR.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('net_cidr')]),
        DESTINATION_IP_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Destination IP address or CIDR.'),
            update_allowed=True,
            constraints=[constraints.CustomConstraint('net_cidr')]),
        SOURCE_PORT:
        properties.Schema(properties.Schema.STRING,
                          _('Source port number or a range.'),
                          update_allowed=True),
        DESTINATION_PORT:
        properties.Schema(properties.Schema.STRING,
                          _('Destination port number or a range.'),
                          update_allowed=True),
        ACTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Action to be performed on the traffic matching the rule.'),
            default='deny',
            constraints=[
                constraints.AllowedValues(['allow', 'deny']),
            ],
            update_allowed=True),
        ENABLED:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Whether this rule should be enabled.'),
                          default=True,
                          update_allowed=True),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('Name for the firewall rule.'),
                          type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the firewall rule.'),
                          type=attributes.Schema.STRING),
        FIREWALL_POLICY_ID:
        attributes.Schema(_(
            'Unique identifier of the firewall policy to which this '
            'firewall rule belongs.'),
                          type=attributes.Schema.STRING),
        SHARED_ATTR:
        attributes.Schema(_('Shared status of this firewall rule.'),
                          type=attributes.Schema.STRING),
        PROTOCOL_ATTR:
        attributes.Schema(_('Protocol value for this firewall rule.'),
                          type=attributes.Schema.STRING),
        IP_VERSION_ATTR:
        attributes.Schema(_('Ip_version for this firewall rule.'),
                          type=attributes.Schema.STRING),
        SOURCE_IP_ADDRESS_ATTR:
        attributes.Schema(_('Source ip_address for this firewall rule.'),
                          type=attributes.Schema.STRING),
        DESTINATION_IP_ADDRESS_ATTR:
        attributes.Schema(_('Destination ip_address for this firewall rule.'),
                          type=attributes.Schema.STRING),
        SOURCE_PORT_ATTR:
        attributes.Schema(_('Source port range for this firewall rule.'),
                          type=attributes.Schema.STRING),
        DESTINATION_PORT_ATTR:
        attributes.Schema(_('Destination port range for this firewall rule.'),
                          type=attributes.Schema.STRING),
        ACTION_ATTR:
        attributes.Schema(_('Allow or deny action for this firewall rule.'),
                          type=attributes.Schema.STRING),
        ENABLED_ATTR:
        attributes.Schema(
            _('Indicates whether this firewall rule is enabled or not.'),
            type=attributes.Schema.STRING),
        POSITION:
        attributes.Schema(
            _('Position of the rule within the firewall policy.'),
            type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Id of the tenant owning the firewall.'),
                          type=attributes.Schema.STRING),
    }

    def _show_resource(self):
        return self.client().show_firewall_rule(
            self.resource_id)['firewall_rule']

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        if props.get(self.PROTOCOL) == 'any':
            props[self.PROTOCOL] = None
        firewall_rule = self.client().create_firewall_rule(
            {'firewall_rule': props})['firewall_rule']
        self.resource_id_set(firewall_rule['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if prop_diff.get(self.PROTOCOL) == 'any':
                prop_diff[self.PROTOCOL] = None
            self.client().update_firewall_rule(self.resource_id,
                                               {'firewall_rule': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_firewall_rule(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
Пример #16
0
class Order(resource.Resource):
    """A resource allowing for the generation secret material by Barbican.

    The resource allows to generate some secret material. It can be, for
    example, some key or certificate. The order encapsulates the workflow
    and history for the creation of a secret. The time to generate a secret can
    vary depending on the type of secret.
    """

    support_status = support.SupportStatus(version='2014.2')

    default_client_name = 'barbican'

    entity = 'orders'

    PROPERTIES = (NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION, ALGORITHM,
                  BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN,
                  SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA,
                  PASS_PHRASE) = ('name', 'payload_content_type', 'mode',
                                  'expiration', 'algorithm', 'bit_length',
                                  'type', 'request_type', 'subject_dn',
                                  'source_container_ref', 'ca_id', 'profile',
                                  'request_data', 'pass_phrase')

    ATTRIBUTES = (STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY,
                  CERTIFICATE, INTERMEDIATES,
                  CONTAINER_REF) = ('status', 'order_ref', 'secret_ref',
                                    'public_key', 'private_key', 'certificate',
                                    'intermediates', 'container_ref')

    ORDER_TYPES = (KEY, ASYMMETRIC, CERTIFICATE) = ('key', 'asymmetric',
                                                    'certificate')

    # full-cmc is declared but not yet supported in barbican
    REQUEST_TYPES = (STORED_KEY, SIMPLE_CMC, CUSTOM) = ('stored-key',
                                                        'simple-cmc', 'custom')

    ALLOWED_PROPERTIES_FOR_TYPE = {
        KEY:
        [NAME, ALGORITHM, BIT_LENGTH, MODE, PAYLOAD_CONTENT_TYPE, EXPIRATION],
        ASYMMETRIC: [
            NAME, ALGORITHM, BIT_LENGTH, MODE, PASS_PHRASE,
            PAYLOAD_CONTENT_TYPE, EXPIRATION
        ],
        CERTIFICATE: [
            NAME, REQUEST_TYPE, SUBJECT_DN, SOURCE_CONTAINER_REF, CA_ID,
            PROFILE, REQUEST_DATA
        ]
    }

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Human readable name for the secret.'),
        ),
        PAYLOAD_CONTENT_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type/format the secret data is provided in.'),
        ),
        EXPIRATION:
        properties.Schema(
            properties.Schema.STRING,
            _('The expiration date for the secret in ISO-8601 format.'),
            constraints=[
                constraints.CustomConstraint('expiration'),
            ],
        ),
        ALGORITHM:
        properties.Schema(
            properties.Schema.STRING,
            _('The algorithm type used to generate the secret. '
              'Required for key and asymmetric types of order.'),
        ),
        BIT_LENGTH:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The bit-length of the secret. Required for key and '
              'asymmetric types of order.'),
        ),
        MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type/mode of the algorithm associated with the secret '
              'information.'),
        ),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of the order.'),
            constraints=[
                constraints.AllowedValues(ORDER_TYPES),
            ],
            required=True,
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        REQUEST_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('The type of the certificate request.'),
            support_status=support.SupportStatus(version='5.0.0'),
            constraints=[constraints.AllowedValues(REQUEST_TYPES)]),
        SUBJECT_DN:
        properties.Schema(
            properties.Schema.STRING,
            _('The subject of the certificate request.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        SOURCE_CONTAINER_REF:
        properties.Schema(
            properties.Schema.STRING,
            _('The source of certificate request.'),
            support_status=support.SupportStatus(version='5.0.0'),
            constraints=[constraints.CustomConstraint('barbican.container')],
        ),
        CA_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The identifier of the CA to use.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        PROFILE:
        properties.Schema(
            properties.Schema.STRING,
            _('The profile of certificate to use.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        REQUEST_DATA:
        properties.Schema(
            properties.Schema.STRING,
            _('The content of the CSR. Only for certificate orders.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        PASS_PHRASE:
        properties.Schema(
            properties.Schema.STRING,
            _('The passphrase the created key. Can be set only '
              'for asymmetric type of order.'),
            support_status=support.SupportStatus(version='5.0.0'),
        ),
    }

    attributes_schema = {
        STATUS:
        attributes.Schema(_('The status of the order.'),
                          type=attributes.Schema.STRING),
        ORDER_REF:
        attributes.Schema(_('The URI to the order.'),
                          type=attributes.Schema.STRING),
        SECRET_REF:
        attributes.Schema(_('The URI to the created secret.'),
                          type=attributes.Schema.STRING),
        CONTAINER_REF:
        attributes.Schema(
            _('The URI to the created container.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        PUBLIC_KEY:
        attributes.Schema(
            _('The payload of the created public key, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        PRIVATE_KEY:
        attributes.Schema(
            _('The payload of the created private key, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        CERTIFICATE:
        attributes.Schema(
            _('The payload of the created certificate, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
        INTERMEDIATES:
        attributes.Schema(
            _('The payload of the created intermediates, if available.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING),
    }

    def handle_create(self):
        info = dict(
            (k, v) for k, v in self.properties.items() if v is not None)
        order = self.client().orders.create(**info)
        order_ref = order.submit()
        self.resource_id_set(order_ref)
        # NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string
        # need not to be fixed re LP bug #1393268
        return order_ref

    def validate(self):
        super(Order, self).validate()
        if self.properties[self.TYPE] != self.CERTIFICATE:
            if (self.properties[self.ALGORITHM] is None
                    or self.properties[self.BIT_LENGTH] is None):
                msg = _("Properties %(algorithm)s and %(bit_length)s are "
                        "required for %(type)s type of order.") % {
                            'algorithm': self.ALGORITHM,
                            'bit_length': self.BIT_LENGTH,
                            'type': self.properties[self.TYPE]
                        }
                raise exception.StackValidationFailed(message=msg)
        else:
            if (self.properties[self.PROFILE]
                    and not self.properties[self.CA_ID]):
                raise exception.ResourcePropertyDependency(prop1=self.PROFILE,
                                                           prop2=self.CA_ID)
        declared_props = sorted([
            k for k, v in six.iteritems(self.properties)
            if k != self.TYPE and v is not None
        ])
        allowed_props = sorted(
            self.ALLOWED_PROPERTIES_FOR_TYPE[self.properties[self.TYPE]])
        diff = sorted(set(declared_props) - set(allowed_props))
        if diff:
            msg = _("Unexpected properties: %(unexpected)s. Only these "
                    "properties are allowed for %(type)s type of order: "
                    "%(allowed)s.") % {
                        'unexpected': ', '.join(diff),
                        'type': self.properties[self.TYPE],
                        'allowed': ', '.join(allowed_props)
                    }
            raise exception.StackValidationFailed(message=msg)

    def check_create_complete(self, order_href):
        order = self.client().orders.get(order_href)

        if order.status == 'ERROR':
            reason = order.error_reason
            code = order.error_status_code
            msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s") % {
                'name': self.name,
                'code': code,
                'reason': reason
            })
            raise exception.Error(msg)

        return order.status == 'ACTIVE'

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        client = self.client()
        order = client.orders.get(self.resource_id)
        if name in (self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE,
                    self.INTERMEDIATES):
            container = client.containers.get(order.container_ref)
            secret = getattr(container, name)
            return secret.payload

        return getattr(order, name)
Пример #17
0
class CloudWatchAlarm(resource.Resource):
    PROPERTIES = (
        COMPARISON_OPERATOR,
        ALARM_DESCRIPTION,
        EVALUATION_PERIODS,
        METRIC_NAME,
        NAMESPACE,
        PERIOD,
        STATISTIC,
        ALARM_ACTIONS,
        OKACTIONS,
        DIMENSIONS,
        INSUFFICIENT_DATA_ACTIONS,
        THRESHOLD,
        UNITS,
    ) = (
        'ComparisonOperator',
        'AlarmDescription',
        'EvaluationPeriods',
        'MetricName',
        'Namespace',
        'Period',
        'Statistic',
        'AlarmActions',
        'OKActions',
        'Dimensions',
        'InsufficientDataActions',
        'Threshold',
        'Units',
    )

    properties_schema = {
        COMPARISON_OPERATOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Operator used to compare the specified Statistic with '
              'Threshold.'),
            constraints=[
                constraints.AllowedValues([
                    'GreaterThanOrEqualToThreshold', 'GreaterThanThreshold',
                    'LessThanThreshold', 'LessThanOrEqualToThreshold'
                ]),
            ],
            required=True,
            update_allowed=True),
        ALARM_DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description for the alarm.'),
                          update_allowed=True),
        EVALUATION_PERIODS:
        properties.Schema(properties.Schema.STRING,
                          _('Number of periods to evaluate over.'),
                          required=True,
                          update_allowed=True),
        METRIC_NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Metric name watched by the alarm.'),
                          required=True),
        NAMESPACE:
        properties.Schema(properties.Schema.STRING,
                          _('Namespace for the metric.'),
                          required=True),
        PERIOD:
        properties.Schema(properties.Schema.STRING,
                          _('Period (seconds) to evaluate over.'),
                          required=True,
                          update_allowed=True),
        STATISTIC:
        properties.Schema(properties.Schema.STRING,
                          _('Metric statistic to evaluate.'),
                          constraints=[
                              constraints.AllowedValues([
                                  'SampleCount', 'Average', 'Sum', 'Minimum',
                                  'Maximum'
                              ]),
                          ],
                          required=True,
                          update_allowed=True),
        ALARM_ACTIONS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of actions to execute when state transitions to alarm.'),
            update_allowed=True),
        OKACTIONS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of actions to execute when state transitions to ok.'),
            update_allowed=True),
        DIMENSIONS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of dimensions (arbitrary name/value pairs) associated '
              'with the metric.')),
        INSUFFICIENT_DATA_ACTIONS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of actions to execute when state transitions to '
              'insufficient-data.'),
            update_allowed=True),
        THRESHOLD:
        properties.Schema(properties.Schema.STRING,
                          _('Threshold to evaluate against.'),
                          required=True,
                          update_allowed=True),
        UNITS:
        properties.Schema(
            properties.Schema.STRING,
            _('Unit for the metric.'),
            constraints=[
                constraints.AllowedValues([
                    'Seconds', 'Microseconds', 'Milliseconds', 'Bytes',
                    'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits',
                    'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent',
                    'Count', 'Bytes/Second', 'Kilobytes/Second',
                    'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second',
                    'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
                    'Gigabits/Second', 'Terabits/Second', 'Count/Second', None
                ]),
            ],
            update_allowed=True),
    }

    strict_dependency = False

    support_status = support.SupportStatus(
        status=support.HIDDEN,
        message=_('OS::Heat::CWLiteAlarm is deprecated, '
                  'use OS::Ceilometer::Alarm instead.'),
        version='5.0.0',
        previous_status=support.SupportStatus(status=support.DEPRECATED,
                                              version='2014.2'))

    def handle_create(self):
        wr = watchrule.WatchRule(context=self.context,
                                 watch_name=self.physical_resource_name(),
                                 rule=dict(self.properties),
                                 stack_id=self.stack.id)
        wr.store()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        # If Properties has changed, update self.properties, so we
        # get the new values during any subsequent adjustment
        if prop_diff:
            self.properties = json_snippet.properties(self.properties_schema,
                                                      self.context)
            loader = watchrule.WatchRule.load
            wr = loader(self.context, watch_name=self.physical_resource_name())

            wr.rule = dict(self.properties)
            wr.store()

    def handle_delete(self):
        try:
            wr = watchrule.WatchRule.load(
                self.context, watch_name=self.physical_resource_name())
            wr.destroy()
        except exception.EntityNotFound:
            pass

    def handle_suspend(self):
        wr = watchrule.WatchRule.load(self.context,
                                      watch_name=self.physical_resource_name())
        wr.state_set(wr.SUSPENDED)

    def handle_resume(self):
        wr = watchrule.WatchRule.load(self.context,
                                      watch_name=self.physical_resource_name())
        # Just set to NODATA, which will be re-evaluated next periodic task
        wr.state_set(wr.NODATA)

    def handle_check(self):
        watch_name = self.physical_resource_name()
        watchrule.WatchRule.load(self.context, watch_name=watch_name)

    def get_reference_id(self):
        return self.physical_resource_name_or_FnGetRefId()

    def physical_resource_name(self):
        return '%s-%s' % (self.stack.name, self.name)
Пример #18
0
class ScalingPolicy(resource.Resource):

    """Represents a Rackspace Auto Scale scaling policy."""

    PROPERTIES = (
        GROUP, NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY,
        COOLDOWN, TYPE, ARGS,
    ) = (
        'group', 'name', 'change', 'changePercent', 'desiredCapacity',
        'cooldown', 'type', 'args',
    )

    properties_schema = {
        # group isn't in the post body, but it's in the URL to post to.
        GROUP: properties.Schema(
            properties.Schema.STRING,
            _('Scaling group ID that this policy belongs to.'),
            required=True
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of this scaling policy.'),
            required=True,
            update_allowed=True
        ),
        CHANGE: properties.Schema(
            properties.Schema.NUMBER,
            _('Amount to add to or remove from current number of instances. '
              'Incompatible with changePercent and desiredCapacity.'),
            update_allowed=True
        ),
        CHANGE_PERCENT: properties.Schema(
            properties.Schema.NUMBER,
            _('Percentage-based change to add or remove from current number '
              'of instances. Incompatible with change and desiredCapacity.'),
            update_allowed=True
        ),
        DESIRED_CAPACITY: properties.Schema(
            properties.Schema.NUMBER,
            _('Absolute number to set the number of instances to. '
              'Incompatible with change and changePercent.'),
            update_allowed=True
        ),
        COOLDOWN: properties.Schema(
            properties.Schema.NUMBER,
            _('Number of seconds after a policy execution during which '
              'further executions are disabled.'),
            update_allowed=True
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Type of this scaling policy. Specifies how the policy is '
              'executed.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['webhook', 'schedule',
                                           'cloud_monitoring']),
            ],
            update_allowed=True
        ),
        ARGS: properties.Schema(
            properties.Schema.MAP,
            _('Type-specific arguments for the policy.'),
            update_allowed=True
        ),
    }

    def _get_args(self, properties):
        """Get pyrax-style create arguments for scaling policies."""
        args = dict(
            scaling_group=properties[self.GROUP],
            name=properties[self.NAME],
            policy_type=properties[self.TYPE],
            cooldown=properties[self.COOLDOWN],
        )
        if properties.get(self.CHANGE) is not None:
            args['change'] = properties[self.CHANGE]
        elif properties.get(self.CHANGE_PERCENT) is not None:
            args['change'] = properties[self.CHANGE_PERCENT]
            args['is_percent'] = True
        elif properties.get(self.DESIRED_CAPACITY) is not None:
            args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
        if properties.get(self.ARGS) is not None:
            args['args'] = properties[self.ARGS]
        return args

    def handle_create(self):
        """Create the scaling policy and initialize the resource ID.

        The resource ID is initialized to {group_id}:{policy_id}.
        """
        asclient = self.stack.clients.auto_scale()
        args = self._get_args(self.properties)
        policy = asclient.add_policy(**args)
        resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
        self.resource_id_set(resource_id)

    def _get_policy_id(self):
        return self.resource_id.split(':', 1)[1]

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        asclient = self.stack.clients.auto_scale()
        args = self._get_args(tmpl_diff['Properties'])
        args['policy'] = self._get_policy_id()
        asclient.replace_policy(**args)

    def handle_delete(self):
        """Delete the policy if it exists."""
        asclient = self.stack.clients.auto_scale()
        if self.resource_id is None:
            return
        policy_id = self._get_policy_id()
        try:
            asclient.delete_policy(self.properties[self.GROUP], policy_id)
        except NotFound:
            pass
Пример #19
0
class CloudLoadBalancer(resource.Resource):
    """Represents a Rackspace Cloud Loadbalancer."""

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (
        NAME,
        NODES,
        PROTOCOL,
        ACCESS_LIST,
        HALF_CLOSED,
        ALGORITHM,
        CONNECTION_LOGGING,
        METADATA,
        PORT,
        TIMEOUT,
        CONNECTION_THROTTLE,
        SESSION_PERSISTENCE,
        VIRTUAL_IPS,
        CONTENT_CACHING,
        HEALTH_MONITOR,
        SSL_TERMINATION,
        ERROR_PAGE,
        HTTPS_REDIRECT,
    ) = (
        'name',
        'nodes',
        'protocol',
        'accessList',
        'halfClosed',
        'algorithm',
        'connectionLogging',
        'metadata',
        'port',
        'timeout',
        'connectionThrottle',
        'sessionPersistence',
        'virtualIps',
        'contentCaching',
        'healthMonitor',
        'sslTermination',
        'errorPage',
        'httpsRedirect',
    )

    LB_UPDATE_PROPS = (NAME, ALGORITHM, PROTOCOL, HALF_CLOSED, PORT, TIMEOUT,
                       HTTPS_REDIRECT)

    _NODE_KEYS = (
        NODE_ADDRESSES,
        NODE_PORT,
        NODE_CONDITION,
        NODE_TYPE,
        NODE_WEIGHT,
    ) = (
        'addresses',
        'port',
        'condition',
        'type',
        'weight',
    )

    _ACCESS_LIST_KEYS = (
        ACCESS_LIST_ADDRESS,
        ACCESS_LIST_TYPE,
    ) = (
        'address',
        'type',
    )

    _CONNECTION_THROTTLE_KEYS = (
        CONNECTION_THROTTLE_MAX_CONNECTION_RATE,
        CONNECTION_THROTTLE_MIN_CONNECTIONS,
        CONNECTION_THROTTLE_MAX_CONNECTIONS,
        CONNECTION_THROTTLE_RATE_INTERVAL,
    ) = (
        'maxConnectionRate',
        'minConnections',
        'maxConnections',
        'rateInterval',
    )

    _VIRTUAL_IP_KEYS = (VIRTUAL_IP_TYPE, VIRTUAL_IP_IP_VERSION,
                        VIRTUAL_IP_ID) = ('type', 'ipVersion', 'id')

    _HEALTH_MONITOR_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
        HEALTH_MONITOR_BODY_REGEX,
        HEALTH_MONITOR_HOST_HEADER,
        HEALTH_MONITOR_PATH,
        HEALTH_MONITOR_STATUS_REGEX,
    ) = (
        'attemptsBeforeDeactivation',
        'delay',
        'timeout',
        'type',
        'bodyRegex',
        'hostHeader',
        'path',
        'statusRegex',
    )
    _HEALTH_MONITOR_CONNECT_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
    )

    _SSL_TERMINATION_KEYS = (
        SSL_TERMINATION_SECURE_PORT,
        SSL_TERMINATION_PRIVATEKEY,
        SSL_TERMINATION_CERTIFICATE,
        SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
        SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
    ) = (
        'securePort',
        'privatekey',
        'certificate',
        'intermediateCertificate',
        'secureTrafficOnly',
    )

    ATTRIBUTES = (PUBLIC_IP, VIPS) = ('PublicIp', 'virtualIps')

    ALGORITHMS = [
        "LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
        "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"
    ]

    _health_monitor_schema = {
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 10),
                          ]),
        HEALTH_MONITOR_DELAY:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 3600),
                          ]),
        HEALTH_MONITOR_TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 300),
                          ]),
        HEALTH_MONITOR_TYPE:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['CONNECT', 'HTTP', 'HTTPS']),
                          ]),
        HEALTH_MONITOR_BODY_REGEX:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_HOST_HEADER:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_PATH:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_STATUS_REGEX:
        properties.Schema(properties.Schema.STRING),
    }

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        NODES:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NODE_ADDRESSES:
                    properties.Schema(
                        properties.Schema.LIST,
                        required=True,
                        description=(_("IP addresses for the load balancer "
                                       "node. Must have at least one "
                                       "address.")),
                        schema=properties.Schema(properties.Schema.STRING)),
                    NODE_PORT:
                    properties.Schema(properties.Schema.INTEGER,
                                      required=True),
                    NODE_CONDITION:
                    properties.Schema(properties.Schema.STRING,
                                      default='ENABLED',
                                      constraints=[
                                          constraints.AllowedValues([
                                              'ENABLED', 'DISABLED', 'DRAINING'
                                          ]),
                                      ]),
                    NODE_TYPE:
                    properties.Schema(properties.Schema.STRING,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['PRIMARY', 'SECONDARY']),
                                      ]),
                    NODE_WEIGHT:
                    properties.Schema(properties.Schema.NUMBER,
                                      constraints=[
                                          constraints.Range(1, 100),
                                      ]),
                },
            ),
            required=True,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS',
                                  'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL',
                                  'POP3', 'POP3S', 'SMTP', 'TCP',
                                  'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM',
                                  'SFTP'
                              ]),
                          ],
                          update_allowed=True),
        ACCESS_LIST:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  ACCESS_LIST_ADDRESS:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  ACCESS_LIST_TYPE:
                                  properties.Schema(
                                      properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['ALLOW', 'DENY']),
                                      ]),
                              },
                          )),
        HALF_CLOSED:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        ALGORITHM:
        properties.Schema(properties.Schema.STRING,
                          constraints=[constraints.AllowedValues(ALGORITHMS)],
                          update_allowed=True),
        CONNECTION_LOGGING:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        METADATA:
        properties.Schema(properties.Schema.MAP, update_allowed=True),
        PORT:
        properties.Schema(properties.Schema.INTEGER,
                          required=True,
                          update_allowed=True),
        TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          constraints=[
                              constraints.Range(1, 120),
                          ],
                          update_allowed=True),
        CONNECTION_THROTTLE:
        properties.Schema(properties.Schema.MAP,
                          schema={
                              CONNECTION_THROTTLE_MAX_CONNECTION_RATE:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(
                                                        0, 100000),
                                                ]),
                              CONNECTION_THROTTLE_MIN_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(1, 1000),
                                                ]),
                              CONNECTION_THROTTLE_MAX_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(
                                                        1, 100000),
                                                ]),
                              CONNECTION_THROTTLE_RATE_INTERVAL:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(1, 3600),
                                                ]),
                          },
                          update_allowed=True),
        SESSION_PERSISTENCE:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['HTTP_COOKIE', 'SOURCE_IP']),
                          ],
                          update_allowed=True),
        VIRTUAL_IPS:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    VIRTUAL_IP_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        "The type of VIP (public or internal). This property"
                        " cannot be specified if 'id' is specified. This "
                        "property must be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['SERVICENET',
                                                       'PUBLIC']),
                        ]),
                    VIRTUAL_IP_IP_VERSION:
                    properties.Schema(
                        properties.Schema.STRING,
                        "IP version of the VIP. This property cannot be "
                        "specified if 'id' is specified. This property must "
                        "be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['IPV6', 'IPV4']),
                        ]),
                    VIRTUAL_IP_ID:
                    properties.Schema(
                        properties.Schema.NUMBER,
                        "ID of a shared VIP to use instead of creating a "
                        "new one. This property cannot be specified if type"
                        " or version is specified.")
                },
            ),
            required=True,
            constraints=[constraints.Length(min=1)]),
        CONTENT_CACHING:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['ENABLED', 'DISABLED']),
                          ],
                          update_allowed=True),
        HEALTH_MONITOR:
        properties.Schema(properties.Schema.MAP,
                          schema=_health_monitor_schema,
                          update_allowed=True),
        SSL_TERMINATION:
        properties.Schema(
            properties.Schema.MAP,
            schema={
                SSL_TERMINATION_SECURE_PORT:
                properties.Schema(properties.Schema.INTEGER, default=443),
                SSL_TERMINATION_PRIVATEKEY:
                properties.Schema(properties.Schema.STRING, required=True),
                SSL_TERMINATION_CERTIFICATE:
                properties.Schema(properties.Schema.STRING, required=True),
                # only required if configuring intermediate ssl termination
                # add to custom validation
                SSL_TERMINATION_INTERMEDIATE_CERTIFICATE:
                properties.Schema(properties.Schema.STRING),
                # pyrax will default to false
                SSL_TERMINATION_SECURE_TRAFFIC_ONLY:
                properties.Schema(properties.Schema.BOOLEAN, default=False),
            },
            update_allowed=True),
        ERROR_PAGE:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        HTTPS_REDIRECT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Enables or disables HTTP to HTTPS redirection for the load "
              "balancer. When enabled, any HTTP request returns status code "
              "301 (Moved Permanently), and the requester is redirected to "
              "the requested URL via the HTTPS protocol on port 443. Only "
              "available for HTTPS protocol (port=443), or HTTP protocol with "
              "a properly configured SSL termination (secureTrafficOnly=true, "
              "securePort=443)."),
            update_allowed=True,
            default=False,
            support_status=support.SupportStatus(version="2015.1"))
    }

    attributes_schema = {
        PUBLIC_IP:
        attributes.Schema(_('Public IP address of the specified instance.')),
        VIPS:
        attributes.Schema(_("A list of assigned virtual ip addresses"))
    }

    def __init__(self, name, json_snippet, stack):
        super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
        self.clb = self.cloud_lb()

    def cloud_lb(self):
        return self.client('cloud_lb')

    def _setup_properties(self, properties, function):
        """Use defined schema properties as kwargs for loadbalancer objects."""
        if properties and function:
            return [
                function(**self._remove_none(item_dict))
                for item_dict in properties
            ]
        elif function:
            return [function()]

    def _alter_properties_for_api(self):
        """Set up required, but useless, key/value pairs.

        The following properties have useless key/value pairs which must
        be passed into the api. Set them up to make template definition easier.
        """
        session_persistence = None
        if self.SESSION_PERSISTENCE in self.properties.data:
            session_persistence = {
                'persistenceType': self.properties[self.SESSION_PERSISTENCE]
            }
        connection_logging = None
        if self.CONNECTION_LOGGING in self.properties.data:
            connection_logging = {
                "enabled": self.properties[self.CONNECTION_LOGGING]
            }
        metadata = None
        if self.METADATA in self.properties.data:
            metadata = [{
                'key': k,
                'value': v
            } for k, v in six.iteritems(self.properties[self.METADATA])]

        return (session_persistence, connection_logging, metadata)

    def _check_status(self, loadbalancer, status_list):
        """Update the loadbalancer state, check the status."""
        loadbalancer.get()
        if loadbalancer.status in status_list:
            return True
        else:
            return False

    def _valid_HTTPS_redirect_with_HTTP_prot(self):
        """Determine if HTTPS redirect is valid when protocol is HTTP"""
        proto = self.properties[self.PROTOCOL]
        redir = self.properties[self.HTTPS_REDIRECT]
        termcfg = self.properties.get(self.SSL_TERMINATION) or {}
        seconly = termcfg.get(self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY, False)
        secport = termcfg.get(self.SSL_TERMINATION_SECURE_PORT, 0)
        if (redir and (proto == "HTTP") and seconly and (secport == 443)):
            return True
        return False

    def _configure_post_creation(self, loadbalancer):
        """Configure all load balancer properties post creation.

        These properties can only be set after the load balancer is created.
        """
        if self.properties[self.ACCESS_LIST]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.add_access_list(self.properties[self.ACCESS_LIST])

        if self.properties[self.ERROR_PAGE]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.set_error_page(self.properties[self.ERROR_PAGE])

        if self.properties[self.SSL_TERMINATION]:
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            ssl_term = self.properties[self.SSL_TERMINATION]
            loadbalancer.add_ssl_termination(
                ssl_term[self.SSL_TERMINATION_SECURE_PORT],
                ssl_term[self.SSL_TERMINATION_PRIVATEKEY],
                ssl_term[self.SSL_TERMINATION_CERTIFICATE],
                intermediateCertificate=ssl_term[
                    self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE],
                enabled=True,
                secureTrafficOnly=ssl_term[
                    self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY])

        if self._valid_HTTPS_redirect_with_HTTP_prot():
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.update(httpsRedirect=True)

        if self.CONTENT_CACHING in self.properties:
            enabled = self.properties[self.CONTENT_CACHING] == 'ENABLED'
            while not self._check_status(loadbalancer, ['ACTIVE']):
                yield
            loadbalancer.content_caching = enabled

    def _process_node(self, node):
        if not node.get(self.NODE_ADDRESSES):
            yield node
        else:
            for addr in node.get(self.NODE_ADDRESSES):
                norm_node = copy.deepcopy(node)
                norm_node['address'] = addr
                del norm_node[self.NODE_ADDRESSES]
                yield norm_node

    def _process_nodes(self, node_list):
        node_itr = six.moves.map(self._process_node, node_list)
        return itertools.chain.from_iterable(node_itr)

    def _validate_https_redirect(self):
        redir = self.properties[self.HTTPS_REDIRECT]
        proto = self.properties[self.PROTOCOL]

        if (redir and (proto != "HTTPS")
                and not self._valid_HTTPS_redirect_with_HTTP_prot()):
            message = _("HTTPS redirect is only available for the HTTPS "
                        "protocol (port=443), or the HTTP protocol with "
                        "a properly configured SSL termination "
                        "(secureTrafficOnly=true, securePort=443).")
            raise exception.StackValidationFailed(message=message)

    def handle_create(self):
        node_list = self._process_nodes(self.properties.get(self.NODES))
        nodes = [self.clb.Node(**node) for node in node_list]
        vips = self.properties.get(self.VIRTUAL_IPS)

        virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)

        (session_persistence, connection_logging,
         metadata) = self._alter_properties_for_api()

        lb_body = {
            'port': self.properties[self.PORT],
            'protocol': self.properties[self.PROTOCOL],
            'nodes': nodes,
            'virtual_ips': virtual_ips,
            'algorithm': self.properties.get(self.ALGORITHM),
            'halfClosed': self.properties.get(self.HALF_CLOSED),
            'connectionThrottle':
            self.properties.get(self.CONNECTION_THROTTLE),
            'metadata': metadata,
            'healthMonitor': self.properties.get(self.HEALTH_MONITOR),
            'sessionPersistence': session_persistence,
            'timeout': self.properties.get(self.TIMEOUT),
            'connectionLogging': connection_logging,
            self.HTTPS_REDIRECT: self.properties[self.HTTPS_REDIRECT]
        }
        if self._valid_HTTPS_redirect_with_HTTP_prot():
            lb_body[self.HTTPS_REDIRECT] = False
        self._validate_https_redirect()

        lb_name = (self.properties.get(self.NAME)
                   or self.physical_resource_name())
        LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
        loadbalancer = self.clb.create(lb_name, **lb_body)
        self.resource_id_set(str(loadbalancer.id))

        post_create = scheduler.TaskRunner(self._configure_post_creation,
                                           loadbalancer)
        post_create(timeout=600)
        return loadbalancer

    def check_create_complete(self, loadbalancer):
        return self._check_status(loadbalancer, ['ACTIVE'])

    def handle_check(self):
        loadbalancer = self.clb.get(self.resource_id)
        if not self._check_status(loadbalancer, ['ACTIVE']):
            raise exception.Error(
                _("Cloud LoadBalancer is not ACTIVE "
                  "(was: %s)") % loadbalancer.status)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Add and remove nodes specified in the prop_diff."""
        lb = self.clb.get(self.resource_id)
        checkers = []

        if self.NODES in prop_diff:
            updated_nodes = prop_diff[self.NODES]
            checkers.extend(self._update_nodes(lb, updated_nodes))

        updated_props = {}
        for prop in six.iterkeys(prop_diff):
            if prop in self.LB_UPDATE_PROPS:
                updated_props[prop] = prop_diff[prop]
        if updated_props:
            checkers.append(self._update_lb_properties(lb, updated_props))

        if self.HEALTH_MONITOR in prop_diff:
            updated_hm = prop_diff[self.HEALTH_MONITOR]
            checkers.append(self._update_health_monitor(lb, updated_hm))

        if self.SESSION_PERSISTENCE in prop_diff:
            updated_sp = prop_diff[self.SESSION_PERSISTENCE]
            checkers.append(self._update_session_persistence(lb, updated_sp))

        if self.SSL_TERMINATION in prop_diff:
            updated_ssl_term = prop_diff[self.SSL_TERMINATION]
            checkers.append(self._update_ssl_termination(lb, updated_ssl_term))

        if self.METADATA in prop_diff:
            updated_metadata = prop_diff[self.METADATA]
            checkers.append(self._update_metadata(lb, updated_metadata))

        if self.ERROR_PAGE in prop_diff:
            updated_errorpage = prop_diff[self.ERROR_PAGE]
            checkers.append(self._update_errorpage(lb, updated_errorpage))

        if self.CONNECTION_LOGGING in prop_diff:
            updated_cl = prop_diff[self.CONNECTION_LOGGING]
            checkers.append(self._update_connection_logging(lb, updated_cl))

        if self.CONNECTION_THROTTLE in prop_diff:
            updated_ct = prop_diff[self.CONNECTION_THROTTLE]
            checkers.append(self._update_connection_throttle(lb, updated_ct))

        if self.CONTENT_CACHING in prop_diff:
            updated_cc = prop_diff[self.CONTENT_CACHING]
            checkers.append(self._update_content_caching(lb, updated_cc))

        return checkers

    def _update_nodes(self, lb, updated_nodes):
        @retry_if_immutable
        def add_nodes(lb, new_nodes):
            lb.add_nodes(new_nodes)

        @retry_if_immutable
        def remove_node(known, node):
            known[node].delete()

        @retry_if_immutable
        def update_node(known, node):
            known[node].update()

        checkers = []
        current_nodes = lb.nodes
        diff_nodes = self._process_nodes(updated_nodes)
        # Loadbalancers can be uniquely identified by address and
        # port.  Old is a dict of all nodes the loadbalancer
        # currently knows about.
        old = dict(("{0.address}{0.port}".format(node), node)
                   for node in current_nodes)
        # New is a dict of the nodes the loadbalancer will know
        # about after this update.
        new = dict(("%s%s" % (node["address"], node[self.NODE_PORT]), node)
                   for node in diff_nodes)

        old_set = set(six.iterkeys(old))
        new_set = set(six.iterkeys(new))

        deleted = old_set.difference(new_set)
        added = new_set.difference(old_set)
        updated = new_set.intersection(old_set)

        if len(current_nodes) + len(added) - len(deleted) < 1:
            raise ValueError(
                _("The loadbalancer:%s requires at least one "
                  "node.") % self.name)
        """
        Add loadbalancers in the new map that are not in the old map.
        Add before delete to avoid deleting the last node and getting in
        an invalid state.
        """
        new_nodes = [self.clb.Node(**new[lb_node]) for lb_node in added]
        if new_nodes:
            checkers.append(scheduler.TaskRunner(add_nodes, lb, new_nodes))

        # Delete loadbalancers in the old dict that are not in the
        # new dict.
        for node in deleted:
            checkers.append(scheduler.TaskRunner(remove_node, old, node))

        # Update nodes that have been changed
        for node in updated:
            node_changed = False
            for attribute in six.iterkeys(new[node]):
                new_value = new[node][attribute]
                if new_value and new_value != getattr(old[node], attribute):
                    node_changed = True
                    setattr(old[node], attribute, new_value)
            if node_changed:
                checkers.append(scheduler.TaskRunner(update_node, old, node))

        return checkers

    def _update_lb_properties(self, lb, updated_props):
        @retry_if_immutable
        def update_lb():
            lb.update(**updated_props)

        return scheduler.TaskRunner(update_lb)

    def _update_health_monitor(self, lb, updated_hm):
        @retry_if_immutable
        def add_health_monitor():
            lb.add_health_monitor(**updated_hm)

        @retry_if_immutable
        def delete_health_monitor():
            lb.delete_health_monitor()

        if updated_hm is None:
            return scheduler.TaskRunner(delete_health_monitor)
        else:
            # Adding a health monitor is a destructive, so there's
            # no need to delete, then add
            return scheduler.TaskRunner(add_health_monitor)

    def _update_session_persistence(self, lb, updated_sp):
        @retry_if_immutable
        def add_session_persistence():
            lb.session_persistence = updated_sp

        @retry_if_immutable
        def delete_session_persistence():
            lb.session_persistence = ''

        if updated_sp is None:
            return scheduler.TaskRunner(delete_session_persistence)
        else:
            # Adding session persistence is destructive
            return scheduler.TaskRunner(add_session_persistence)

    def _update_ssl_termination(self, lb, updated_ssl_term):
        @retry_if_immutable
        def add_ssl_termination():
            lb.add_ssl_termination(**updated_ssl_term)

        @retry_if_immutable
        def delete_ssl_termination():
            lb.delete_ssl_termination()

        if updated_ssl_term is None:
            return scheduler.TaskRunner(delete_ssl_termination)
        else:
            # Adding SSL termination is destructive
            return scheduler.TaskRunner(add_ssl_termination)

    def _update_metadata(self, lb, updated_metadata):
        @retry_if_immutable
        def add_metadata():
            lb.set_metadata(updated_metadata)

        @retry_if_immutable
        def delete_metadata():
            lb.delete_metadata()

        if updated_metadata is None:
            return scheduler.TaskRunner(delete_metadata)
        else:
            return scheduler.TaskRunner(add_metadata)

    def _update_errorpage(self, lb, updated_errorpage):
        @retry_if_immutable
        def add_errorpage():
            lb.set_error_page(updated_errorpage)

        @retry_if_immutable
        def delete_errorpage():
            lb.clear_error_page()

        if updated_errorpage is None:
            return scheduler.TaskRunner(delete_errorpage)
        else:
            return scheduler.TaskRunner(add_errorpage)

    def _update_connection_logging(self, lb, updated_cl):
        @retry_if_immutable
        def enable_connection_logging():
            lb.connection_logging = True

        @retry_if_immutable
        def disable_connection_logging():
            lb.connection_logging = False

        if updated_cl:
            return scheduler.TaskRunner(enable_connection_logging)
        else:
            return scheduler.TaskRunner(disable_connection_logging)

    def _update_connection_throttle(self, lb, updated_ct):
        @retry_if_immutable
        def add_connection_throttle():
            lb.add_connection_throttle(**updated_ct)

        @retry_if_immutable
        def delete_connection_throttle():
            lb.delete_connection_throttle()

        if updated_ct is None:
            return scheduler.TaskRunner(delete_connection_throttle)
        else:
            return scheduler.TaskRunner(add_connection_throttle)

    def _update_content_caching(self, lb, updated_cc):
        @retry_if_immutable
        def enable_content_caching():
            lb.content_caching = True

        @retry_if_immutable
        def disable_content_caching():
            lb.content_caching = False

        if updated_cc == 'ENABLED':
            return scheduler.TaskRunner(enable_content_caching)
        else:
            return scheduler.TaskRunner(disable_content_caching)

    def check_update_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def handle_delete(self):
        @retry_if_immutable
        def delete_lb(lb):
            lb.delete()

        if self.resource_id is None:
            return
        try:
            loadbalancer = self.clb.get(self.resource_id)
        except NotFound:
            pass
        else:
            if loadbalancer.status != 'DELETED':
                task = scheduler.TaskRunner(delete_lb, loadbalancer)
                task.start()
                return task

    def check_delete_complete(self, task):
        if task and not task.step():
            return False

        return True

    def _remove_none(self, property_dict):
        """Remove None values that would cause schema validation problems.

        These are values that may be initialized to None.
        """
        return dict((key, value)
                    for (key, value) in six.iteritems(property_dict)
                    if value is not None)

    def validate(self):
        """Validate any of the provided params."""
        res = super(CloudLoadBalancer, self).validate()
        if res:
            return res

        if self.properties.get(self.HALF_CLOSED):
            if not (self.properties[self.PROTOCOL] == 'TCP'
                    or self.properties[self.PROTOCOL] == 'TCP_CLIENT_FIRST'):
                message = (_('The %s property is only available for the TCP '
                             'or TCP_CLIENT_FIRST protocols') %
                           self.HALF_CLOSED)
                raise exception.StackValidationFailed(message=message)

        # health_monitor connect and http types require completely different
        # schema
        if self.properties.get(self.HEALTH_MONITOR):
            prop_val = self.properties[self.HEALTH_MONITOR]
            health_monitor = self._remove_none(prop_val)

            schema = self._health_monitor_schema
            if health_monitor[self.HEALTH_MONITOR_TYPE] == 'CONNECT':
                schema = dict((k, v) for k, v in schema.items()
                              if k in self._HEALTH_MONITOR_CONNECT_KEYS)
            properties.Properties(schema, health_monitor, function.resolve,
                                  self.name).validate()

        # validate if HTTPS_REDIRECT is true
        self._validate_https_redirect()
        # if a vip specifies and id, it can't specify version or type;
        # otherwise version and type are required
        for vip in self.properties.get(self.VIRTUAL_IPS, []):
            has_id = vip.get(self.VIRTUAL_IP_ID) is not None
            has_version = vip.get(self.VIRTUAL_IP_IP_VERSION) is not None
            has_type = vip.get(self.VIRTUAL_IP_TYPE) is not None
            if has_id:
                if (has_version or has_type):
                    message = _("Cannot specify type or version if VIP id is"
                                " specified.")
                    raise exception.StackValidationFailed(message=message)
            elif not (has_version and has_type):
                message = _("Must specify VIP type and version if no id "
                            "specified.")
                raise exception.StackValidationFailed(message=message)

    def _public_ip(self, lb):
        for ip in lb.virtual_ips:
            if ip.type == 'PUBLIC':
                return six.text_type(ip.address)

    def _resolve_attribute(self, key):
        if self.resource_id:
            lb = self.clb.get(self.resource_id)
            attribute_function = {
                self.PUBLIC_IP:
                self._public_ip(lb),
                self.VIPS: [{
                    "id": vip.id,
                    "type": vip.type,
                    "ip_version": vip.ip_version,
                    "address": vip.address
                } for vip in lb.virtual_ips]
            }
            if key not in attribute_function:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            function = attribute_function[key]
            LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'), {
                'name': self.name,
                'key': key,
                'function': function
            })
            return function
Пример #20
0
class Group(resource.Resource):

    """Represents a scaling group."""

    # pyrax differs drastically from the actual Auto Scale API. We'll prefer
    # the true API here, but since pyrax doesn't support the full flexibility
    # of the API, we'll have to restrict what users can provide.

    # properties are identical to the API POST /groups.
    PROPERTIES = (
        GROUP_CONFIGURATION, LAUNCH_CONFIGURATION,
    ) = (
        'groupConfiguration', 'launchConfiguration',
    )

    _GROUP_CONFIGURATION_KEYS = (
        GROUP_CONFIGURATION_MAX_ENTITIES, GROUP_CONFIGURATION_COOLDOWN,
        GROUP_CONFIGURATION_NAME, GROUP_CONFIGURATION_MIN_ENTITIES,
        GROUP_CONFIGURATION_METADATA,
    ) = (
        'maxEntities', 'cooldown',
        'name', 'minEntities',
        'metadata',
    )

    _LAUNCH_CONFIG_KEYS = (
        LAUNCH_CONFIG_ARGS, LAUNCH_CONFIG_TYPE,
    ) = (
        'args', 'type',
    )

    _LAUNCH_CONFIG_ARGS_KEYS = (
        LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
        LAUNCH_CONFIG_ARGS_SERVER,
    ) = (
        'loadBalancers',
        'server',
    )

    _LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
        LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
        LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
    ) = (
        'loadBalancerId',
        'port',
    )

    _LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
        LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
        LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
        LAUNCH_CONFIG_ARGS_SERVER_METADATA,
        LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
        LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
        LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
        LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
    ) = (
        'name', 'flavorRef',
        'imageRef',
        'metadata',
        'personality',
        'networks',
        'diskConfig',  # technically maps to OS-DCF:diskConfig
        'key_name',
    )

    _LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
        LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID,
    ) = (
        'uuid',
    )

    _launch_configuration_args_schema = {
        LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
            properties.Schema.LIST,
            _('List of load balancers to hook the '
              'server up to. If not specified, no '
              'load balancing will be configured.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID: properties.Schema(
                        properties.Schema.STRING,
                        _('ID of the load balancer.'),
                        required=True
                    ),
                    LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
                        properties.Schema.NUMBER,
                        _('Server port to connect the load balancer to.'),
                        required=True
                    ),
                },
            )
        ),
        LAUNCH_CONFIG_ARGS_SERVER: properties.Schema(
            properties.Schema.MAP,
            _('Server creation arguments, as accepted by the Cloud Servers '
              'server creation API.'),
            schema={
                LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Server name.'),
                    required=True
                ),
                LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF: properties.Schema(
                    properties.Schema.STRING,
                    _('Flavor ID.'),
                    required=True
                ),
                LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF: properties.Schema(
                    properties.Schema.STRING,
                    _('Image ID.'),
                    required=True
                ),
                LAUNCH_CONFIG_ARGS_SERVER_METADATA: properties.Schema(
                    properties.Schema.MAP,
                    _('Metadata key and value pairs.')
                ),
                LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY: properties.Schema(
                    properties.Schema.MAP,
                    _('File path and contents.')
                ),
                LAUNCH_CONFIG_ARGS_SERVER_NETWORKS: properties.Schema(
                    properties.Schema.LIST,
                    _('Networks to attach to. If unspecified, the instance '
                      'will be attached to the public Internet and private '
                      'ServiceNet networks.'),
                    schema=properties.Schema(
                        properties.Schema.MAP,
                        schema={
                            LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('UUID of network to attach to.'),
                                required=True)
                        }
                    )
                ),
                LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG: properties.Schema(
                    properties.Schema.STRING,
                    _('Configuration specifying the partition layout. AUTO to '
                      'create a partition utilizing the entire disk, and '
                      'MANUAL to create a partition matching the source '
                      'image.'),
                    constraints=[
                        constraints.AllowedValues(['AUTO', 'MANUAL']),
                    ]
                ),
                LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Name of a previously created SSH keypair to allow '
                      'key-based authentication to the server.')
                ),
            },
            required=True
        ),
    }

    properties_schema = {
        GROUP_CONFIGURATION: properties.Schema(
            properties.Schema.MAP,
            _('Group configuration.'),
            schema={
                GROUP_CONFIGURATION_MAX_ENTITIES: properties.Schema(
                    properties.Schema.NUMBER,
                    _('Maximum number of entities in this scaling group.'),
                    required=True
                ),
                GROUP_CONFIGURATION_COOLDOWN: properties.Schema(
                    properties.Schema.NUMBER,
                    _('Number of seconds after capacity changes during '
                      'which further capacity changes are disabled.'),
                    required=True
                ),
                GROUP_CONFIGURATION_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Name of the scaling group.'),
                    required=True
                ),
                GROUP_CONFIGURATION_MIN_ENTITIES: properties.Schema(
                    properties.Schema.NUMBER,
                    _('Minimum number of entities in this scaling group.'),
                    required=True
                ),
                GROUP_CONFIGURATION_METADATA: properties.Schema(
                    properties.Schema.MAP,
                    _('Arbitrary key/value metadata to associate with '
                      'this group.')
                ),
            },
            required=True,
            update_allowed=True
        ),
        LAUNCH_CONFIGURATION: properties.Schema(
            properties.Schema.MAP,
            _('Launch configuration.'),
            schema={
                LAUNCH_CONFIG_ARGS: properties.Schema(
                    properties.Schema.MAP,
                    _('Type-specific server launching arguments.'),
                    schema=_launch_configuration_args_schema,
                    required=True
                ),
                LAUNCH_CONFIG_TYPE: properties.Schema(
                    properties.Schema.STRING,
                    _('Launch configuration method. Only launch_server '
                      'is currently supported.'),
                    required=True,
                    constraints=[
                        constraints.AllowedValues(['launch_server']),
                    ]
                ),
            },
            required=True,
            update_allowed=True
        ),
        # We don't allow scaling policies to be specified here, despite the
        # fact that the API supports it. Users should use the ScalingPolicy
        # resource.
    }

    def _get_group_config_args(self, groupconf):
        """Get the groupConfiguration-related pyrax arguments."""
        return dict(
            name=groupconf[self.GROUP_CONFIGURATION_NAME],
            cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
            min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
            max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
            metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))

    def _get_launch_config_args(self, launchconf):
        """Get the launchConfiguration-related pyrax arguments."""
        lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
        server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
        lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
        lbs = copy.deepcopy(lb_args)
        if lbs:
            for lb in lbs:
                lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
                lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
        return dict(
            launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
            server_name=server_args[self.GROUP_CONFIGURATION_NAME],
            image=server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF],
            flavor=server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF],
            disk_config=server_args.get(
                self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
            metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
            personality=[
                {'path': k, 'contents': v} for k, v in server_args.get(
                    self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY).items()],
            networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
            load_balancers=lbs,
            key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
        )

    def _get_create_args(self):
        """Get pyrax-style arguments for creating a scaling group."""
        args = self._get_group_config_args(
            self.properties[self.GROUP_CONFIGURATION])
        args['group_metadata'] = args.pop('metadata')
        args.update(self._get_launch_config_args(
            self.properties[self.LAUNCH_CONFIGURATION]))
        return args

    def handle_create(self):
        """Create the autoscaling group and set resource_id.

        The resource_id is set to the resulting group's ID.
        """
        asclient = self.stack.clients.auto_scale()
        group = asclient.create(**self._get_create_args())
        self.resource_id_set(str(group.id))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Update the group configuration and the launch configuration."""
        asclient = self.stack.clients.auto_scale()
        if self.GROUP_CONFIGURATION in prop_diff:
            args = self._get_group_config_args(
                prop_diff[self.GROUP_CONFIGURATION])
            asclient.replace(self.resource_id, **args)
        if self.LAUNCH_CONFIGURATION in prop_diff:
            args = self._get_launch_config_args(
                prop_diff[self.LAUNCH_CONFIGURATION])
            asclient.replace_launch_config(self.resource_id, **args)

    def handle_delete(self):
        """Delete the scaling group.

        Since Auto Scale doesn't allow deleting a group until all its servers
        are gone, we must set the minEntities and maxEntities of the group to 0
        and then keep trying the delete until Auto Scale has deleted all the
        servers and the delete will succeed.
        """
        if self.resource_id is None:
            return
        asclient = self.stack.clients.auto_scale()
        args = self._get_group_config_args(
            self.properties[self.GROUP_CONFIGURATION])
        args['min_entities'] = 0
        args['max_entities'] = 0
        try:
            asclient.replace(self.resource_id, **args)
        except NotFound:
            pass

    def check_delete_complete(self, result):
        """Try the delete operation until it succeeds."""
        if self.resource_id is None:
            return True
        try:
            self.stack.clients.auto_scale().delete(self.resource_id)
        except Forbidden:
            return False
        except NotFound:
            return True
        else:
            return True
Пример #21
0
class SoftwareConfig(resource.Resource):
    '''
    A resource for describing and storing software configuration.

    The software_configs API which backs this resource creates immutable
    configs, so any change to the template resource definition will result
    in a new config being created, and the old one being deleted.

    Configs can be defined in the same template which uses them, or they can
    be created in one stack, and passed to another stack via a parameter.

    A config resource can be referenced in other resource properties which
    are config-aware. This includes the properties OS::Nova::Server user_data,
    OS::Heat::SoftwareDeployment config and OS::Heat::MultipartMime parts
    config.

    Along with the config script itself, this resource can define schemas for
    inputs and outputs which the config script is expected to consume and
    produce. Inputs and outputs are optional and will map to concepts which
    are specific to the configuration tool being used.
    '''

    PROPERTIES = (GROUP, CONFIG, OPTIONS, INPUTS,
                  OUTPUTS) = ('group', 'config', 'options', 'inputs',
                              'outputs')

    IO_PROPERTIES = (NAME, DESCRIPTION, TYPE, DEFAULT,
                     ERROR_OUTPUT) = ('name', 'description', 'type', 'default',
                                      'error_output')

    ATTRIBUTES = (CONFIG_ATTR, ) = ('config', )

    input_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the input.'),
                          required=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of the input.')),
        TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Type of the value of the input.'),
                          default='String',
                          constraints=[
                              constraints.AllowedValues(
                                  ('String', 'Number', 'CommaDelimitedList',
                                   'Json'))
                          ]),
        DEFAULT:
        properties.Schema(
            properties.Schema.STRING,
            _('Default value for the input if none is specified.'),
        ),
    }

    output_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the output.'),
                          required=True),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of the output.')),
        TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('Type of the value of the output.'),
                          default='String',
                          constraints=[
                              constraints.AllowedValues(
                                  ('String', 'Number', 'CommaDelimitedList',
                                   'Json'))
                          ]),
        ERROR_OUTPUT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Denotes that the deployment is in an error state if this '
              'output has a value.'),
            default=False)
    }

    properties_schema = {
        GROUP:
        properties.Schema(
            properties.Schema.STRING,
            _('Namespace to group this software config by when delivered to '
              'a server. This may imply what configuration tool is going to '
              'perform the configuration.'),
            default='Heat::Ungrouped'),
        CONFIG:
        properties.Schema(
            properties.Schema.STRING,
            _('Configuration script or manifest which specifies what actual '
              'configuration is performed.'),
        ),
        OPTIONS:
        properties.Schema(
            properties.Schema.MAP,
            _('Map containing options specific to the configuration '
              'management tool used by this resource.'),
        ),
        INPUTS:
        properties.Schema(
            properties.Schema.LIST,
            _('Schema representing the inputs that this software config is '
              'expecting.'),
            schema=properties.Schema(properties.Schema.MAP,
                                     schema=input_schema)),
        OUTPUTS:
        properties.Schema(
            properties.Schema.LIST,
            _('Schema representing the outputs that this software config '
              'will produce.'),
            schema=properties.Schema(properties.Schema.MAP,
                                     schema=output_schema)),
    }

    attributes_schema = {
        CONFIG_ATTR:
        attributes.Schema(_("The config value of the software config.")),
    }

    default_client_name = 'heat'

    def handle_create(self):
        props = dict(self.properties)
        props[self.NAME] = self.physical_resource_name()

        sc = self.heat().software_configs.create(**props)
        self.resource_id_set(sc.id)

    def handle_delete(self):

        if self.resource_id is None:
            return

        try:
            self.heat().software_configs.delete(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)

    def _resolve_attribute(self, name):
        '''
        "config" returns the config value of the software config. If the
         software config does not exist, returns an empty string.
        '''
        if name == self.CONFIG_ATTR and self.resource_id:
            try:
                return self.heat().software_configs.get(
                    self.resource_id).config
            except Exception as ex:
                if self.client_plugin().is_not_found(ex):
                    return None
Пример #22
0
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
        COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
        HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
        INSTANCE_ID,
    ) = (
        'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
        'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
        'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
        'InstanceId',
    )

    _TAG_KEYS = (
        TAG_KEY, TAG_VALUE,
    ) = (
        'Key', 'Value',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE
    ) = (
        'AutoScalingRollingUpdate'
    )

    _ROLLING_UPDATE_SCHEMA_KEYS = (
        MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
    ) = (
        'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
    )

    ATTRIBUTES = (
        INSTANCE_LIST,
    ) = (
        'InstanceList',
    )

    properties_schema = {
        AVAILABILITY_ZONES: properties.Schema(
            properties.Schema.LIST,
            _('Not Implemented.'),
            required=True
        ),
        LAUNCH_CONFIGURATION_NAME: properties.Schema(
            properties.Schema.STRING,
            _('The reference to a LaunchConfiguration resource.'),
            update_allowed=True
        ),
        INSTANCE_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of an existing instance to use to '
              'create the Auto Scaling group. If specify this property, '
              'will create the group use an existing instance instead of '
              'a launch configuration.'),
            constraints=[
                constraints.CustomConstraint("nova.server")
            ]
        ),
        MAX_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of instances in the group.'),
            required=True,
            update_allowed=True
        ),
        MIN_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Minimum number of instances in the group.'),
            required=True,
            update_allowed=True
        ),
        COOLDOWN: properties.Schema(
            properties.Schema.INTEGER,
            _('Cooldown period, in seconds.'),
            update_allowed=True
        ),
        DESIRED_CAPACITY: properties.Schema(
            properties.Schema.INTEGER,
            _('Desired initial number of instances.'),
            update_allowed=True
        ),
        HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
            properties.Schema.INTEGER,
            _('Not Implemented.'),
            implemented=False
        ),
        HEALTH_CHECK_TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Not Implemented.'),
            constraints=[
                constraints.AllowedValues(['EC2', 'ELB']),
            ],
            implemented=False
        ),
        LOAD_BALANCER_NAMES: properties.Schema(
            properties.Schema.LIST,
            _('List of LoadBalancer resources.')
        ),
        VPCZONE_IDENTIFIER: properties.Schema(
            properties.Schema.LIST,
            _('Use only with Neutron, to list the internal subnet to '
              'which the instance will be attached; '
              'needed only if multiple exist; '
              'list length must be exactly 1.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('UUID of the internal subnet to which the instance '
                  'will be attached.')
            )
        ),
        TAGS: properties.Schema(
            properties.Schema.LIST,
            _('Tags to attach to this group.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TAG_KEY: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                    TAG_VALUE: properties.Schema(
                        properties.Schema.STRING,
                        required=True
                    ),
                },
            )
        ),
    }

    attributes_schema = {
        INSTANCE_LIST: attributes.Schema(
            _("A comma-delimited list of server ip addresses. "
              "(Heat extension)."),
            type=attributes.Schema.STRING
        ),
    }

    rolling_update_schema = {
        MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
                                                    default=0),
        MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
                                          default=1),
        PAUSE_TIME: properties.Schema(properties.Schema.STRING,
                                      default='PT0S')
    }

    update_policy_schema = {
        ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
                                          schema=rolling_update_schema)
    }

    def handle_create(self):
        self.validate_launchconfig()
        return self.create_with_template(self.child_template())

    def _make_launch_config_resource(self, name, props):
        lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
        lc_res_def = rsrc_defn.ResourceDefinition(name,
                                                  lc_res_type,
                                                  props)
        lc_res = resource.Resource(name, lc_res_def, self.stack)
        return lc_res

    def _get_conf_properties(self):
        instance_id = self.properties.get(self.INSTANCE_ID)
        if instance_id:
            server = self.client_plugin('nova').get_server(instance_id)
            instance_props = {
                'ImageId': server.image['id'],
                'InstanceType': server.flavor['id'],
                'KeyName': server.key_name,
                'SecurityGroups': [sg['name']
                                   for sg in server.security_groups]
            }
            conf = self._make_launch_config_resource(self.name,
                                                     instance_props)
            props = function.resolve(conf.properties.data)
        else:
            conf, props = super(AutoScalingGroup, self)._get_conf_properties()

        vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
        if vpc_zone_ids:
            props['SubnetId'] = vpc_zone_ids[0]

        return conf, props

    def check_create_complete(self, task):
        """Invoke the cooldown after creation succeeds."""
        done = super(AutoScalingGroup, self).check_create_complete(task)
        if done:
            self._cooldown_timestamp(
                "%s : %s" % (EXACT_CAPACITY, grouputils.get_size(self)))
        return done

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """
        If Properties has changed, update self.properties, so we get the new
        values during any subsequent adjustment.
        """
        if tmpl_diff:
            # parse update policy
            if 'UpdatePolicy' in tmpl_diff:
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        if prop_diff:
            # Replace instances first if launch configuration has changed
            self._try_rolling_update(prop_diff)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            self.adjust(self.properties[self.DESIRED_CAPACITY],
                        adjustment_type=EXACT_CAPACITY)
        else:
            current_capacity = grouputils.get_size(self)
            self.adjust(current_capacity, adjustment_type=EXACT_CAPACITY)

    def adjust(self, adjustment, adjustment_type=CHANGE_IN_CAPACITY,
               min_adjustment_step=None):
        """
        Adjust the size of the scaling group if the cooldown permits.
        """
        if self._cooldown_inprogress():
            LOG.info(_LI("%(name)s NOT performing scaling adjustment, "
                         "cooldown %(cooldown)s"),
                     {'name': self.name,
                      'cooldown': self.properties[self.COOLDOWN]})
            return

        capacity = grouputils.get_size(self)
        lower = self.properties[self.MIN_SIZE]
        upper = self.properties[self.MAX_SIZE]

        new_capacity = _calculate_new_capacity(capacity, adjustment,
                                               adjustment_type,
                                               min_adjustment_step,
                                               lower, upper)

        # send a notification before, on-error and on-success.
        notif = {
            'stack': self.stack,
            'adjustment': adjustment,
            'adjustment_type': adjustment_type,
            'capacity': capacity,
            'groupname': self.FnGetRefId(),
            'message': _("Start resizing the group %(group)s") % {
                'group': self.FnGetRefId()},
            'suffix': 'start',
        }
        notification.send(**notif)
        try:
            self.resize(new_capacity)
        except Exception as resize_ex:
            with excutils.save_and_reraise_exception():
                try:
                    notif.update({'suffix': 'error',
                                  'message': six.text_type(resize_ex),
                                  'capacity': grouputils.get_size(self),
                                  })
                    notification.send(**notif)
                except Exception:
                    LOG.exception(_LE('Failed sending error notification'))
        else:
            notif.update({
                'suffix': 'end',
                'capacity': new_capacity,
                'message': _("End resizing the group %(group)s") % {
                    'group': notif['groupname']},
            })
            notification.send(**notif)
        finally:
            self._cooldown_timestamp("%s : %s" % (adjustment_type,
                                                  adjustment))

    def _tags(self):
        """Add Identifing Tags to all servers in the group.

        This is so the Dimensions received from cfn-push-stats all include
        the groupname and stack id.
        Note: the group name must match what is returned from FnGetRefId
        """
        autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
                            self.TAG_VALUE: self.FnGetRefId()}]
        return super(AutoScalingGroup, self)._tags() + autoscaling_tag

    def validate(self):
        # check validity of group size
        min_size = self.properties[self.MIN_SIZE]
        max_size = self.properties[self.MAX_SIZE]

        if max_size < min_size:
            msg = _("MinSize can not be greater than MaxSize")
            raise exception.StackValidationFailed(message=msg)

        if min_size < 0:
            msg = _("The size of AutoScalingGroup can not be less than zero")
            raise exception.StackValidationFailed(message=msg)

        if self.properties[self.DESIRED_CAPACITY] is not None:
            desired_capacity = self.properties[self.DESIRED_CAPACITY]
            if desired_capacity < min_size or desired_capacity > max_size:
                msg = _("DesiredCapacity must be between MinSize and MaxSize")
                raise exception.StackValidationFailed(message=msg)

        # TODO(pasquier-s): once Neutron is able to assign subnets to
        # availability zones, it will be possible to specify multiple subnets.
        # For now, only one subnet can be specified. The bug #1096017 tracks
        # this issue.
        if (self.properties.get(self.VPCZONE_IDENTIFIER) and
                len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
            raise exception.NotSupported(feature=_("Anything other than one "
                                         "VPCZoneIdentifier"))
        # validate properties InstanceId and LaunchConfigurationName
        # for aws auto scaling group.
        # should provide just only one of
        if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
            instanceId = self.properties.get(self.INSTANCE_ID)
            launch_config = self.properties.get(
                self.LAUNCH_CONFIGURATION_NAME)
            if bool(instanceId) == bool(launch_config):
                msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
                        "must be provided.")
                raise exception.StackValidationFailed(message=msg)

        super(AutoScalingGroup, self).validate()

    def _resolve_attribute(self, name):
        '''
        heat extension: "InstanceList" returns comma delimited list of server
        ip addresses.
        '''
        if name == self.INSTANCE_LIST:
            return u','.join(inst.FnGetAtt('PublicIp')
                             for inst in grouputils.get_members(self)) or None

    def child_template(self):
        if self.properties[self.DESIRED_CAPACITY]:
            num_instances = self.properties[self.DESIRED_CAPACITY]
        else:
            num_instances = self.properties[self.MIN_SIZE]
        return self._create_template(num_instances)
Пример #23
0
class SecurityService(resource.Resource):
    """A resource that implements security service of Manila.

    A security_service is a set of options that defines a security domain
    for a particular shared filesystem protocol, such as an
    Active Directory domain or a Kerberos domain.
    """

    support_status = support.SupportStatus(version='5.0.0')

    PROPERTIES = (
        NAME, TYPE, DNS_IP, SERVER, DOMAIN, USER,
        PASSWORD, DESCRIPTION
    ) = (
        'name', 'type', 'dns_ip', 'server', 'domain', 'user',
        'password', 'description'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Security service name.'),
            update_allowed=True
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Security service type.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ldap', 'kerberos',
                                           'active_directory'])
            ]
        ),
        DNS_IP: properties.Schema(
            properties.Schema.STRING,
            _('DNS IP address used inside tenant\'s network.'),
            update_allowed=True
        ),
        SERVER: properties.Schema(
            properties.Schema.STRING,
            _('Security service IP address or hostname.'),
            update_allowed=True
        ),
        DOMAIN: properties.Schema(
            properties.Schema.STRING,
            _('Security service domain.'),
            update_allowed=True
        ),
        USER: properties.Schema(
            properties.Schema.STRING,
            _('Security service user or group used by tenant.'),
            update_allowed=True
        ),
        PASSWORD: properties.Schema(
            properties.Schema.STRING,
            _('Password used by user.'),
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Security service description.'),
            update_allowed=True
        )
    }

    default_client_name = 'manila'

    entity = 'security_services'

    def handle_create(self):
        args = dict((k, v) for k, v in self.properties.items()
                    if v is not None)
        security_service = self.client().security_services.create(**args)
        self.resource_id_set(security_service.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().security_services.update(self.resource_id,
                                                   **prop_diff)
Пример #24
0
class Pool(neutron.NeutronResource):
    """A resource for managing load balancer pools in Neutron.

    A load balancing pool is a logical set of devices, such as web servers,
    that you group together to receive and process traffic. The loadbalancing
    function chooses a member of the pool according to the configured load
    balancing method to handle the new requests or connections received on the
    VIP address. There is only one pool for a VIP.
    """

    required_service_extension = 'lbaas'

    PROPERTIES = (
        PROTOCOL,
        SUBNET_ID,
        SUBNET,
        LB_METHOD,
        NAME,
        DESCRIPTION,
        ADMIN_STATE_UP,
        VIP,
        MONITORS,
        PROVIDER,
    ) = (
        'protocol',
        'subnet_id',
        'subnet',
        'lb_method',
        'name',
        'description',
        'admin_state_up',
        'vip',
        'monitors',
        'provider',
    )

    _VIP_KEYS = (
        VIP_NAME,
        VIP_DESCRIPTION,
        VIP_SUBNET,
        VIP_ADDRESS,
        VIP_CONNECTION_LIMIT,
        VIP_PROTOCOL_PORT,
        VIP_SESSION_PERSISTENCE,
        VIP_ADMIN_STATE_UP,
    ) = (
        'name',
        'description',
        'subnet',
        'address',
        'connection_limit',
        'protocol_port',
        'session_persistence',
        'admin_state_up',
    )

    _VIP_SESSION_PERSISTENCE_KEYS = (
        VIP_SESSION_PERSISTENCE_TYPE,
        VIP_SESSION_PERSISTENCE_COOKIE_NAME,
    ) = (
        'type',
        'cookie_name',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        NAME_ATTR,
        PROTOCOL_ATTR,
        SUBNET_ID_ATTR,
        LB_METHOD_ATTR,
        DESCRIPTION_ATTR,
        TENANT_ID,
        VIP_ATTR,
        PROVIDER_ATTR,
    ) = (
        'admin_state_up',
        'name',
        'protocol',
        'subnet_id',
        'lb_method',
        'description',
        'tenant_id',
        'vip',
        'provider',
    )

    properties_schema = {
        PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          _('Protocol for balancing.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['TCP', 'HTTP', 'HTTPS']),
                          ]),
        SUBNET_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % SUBNET,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.subnet')]),
        SUBNET:
        properties.Schema(
            properties.Schema.STRING,
            _('The subnet for the port on which the members '
              'of the pool will be connected.'),
            support_status=support.SupportStatus(version='2014.2'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.subnet')]),
        LB_METHOD:
        properties.Schema(
            properties.Schema.STRING,
            _('The algorithm used to distribute load between the members of '
              'the pool.'),
            required=True,
            constraints=[
                constraints.AllowedValues(
                    ['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']),
            ],
            update_allowed=True),
        NAME:
        properties.Schema(properties.Schema.STRING, _('Name of the pool.')),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of the pool.'),
                          update_allowed=True),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of this pool.'),
                          default=True,
                          update_allowed=True),
        PROVIDER:
        properties.Schema(
            properties.Schema.STRING,
            _('LBaaS provider to implement this load balancer instance.'),
            support_status=support.SupportStatus(version='5.0.0'),
            constraints=[constraints.CustomConstraint('neutron.lb.provider')],
        ),
        VIP:
        properties.Schema(
            properties.Schema.MAP,
            _('IP address and port of the pool.'),
            schema={
                VIP_NAME:
                properties.Schema(properties.Schema.STRING,
                                  _('Name of the vip.')),
                VIP_DESCRIPTION:
                properties.Schema(properties.Schema.STRING,
                                  _('Description of the vip.')),
                VIP_SUBNET:
                properties.Schema(
                    properties.Schema.STRING,
                    _('Subnet of the vip.'),
                    constraints=[
                        constraints.CustomConstraint('neutron.subnet')
                    ]),
                VIP_ADDRESS:
                properties.Schema(
                    properties.Schema.STRING,
                    _('IP address of the vip.'),
                    constraints=[constraints.CustomConstraint('ip_addr')]),
                VIP_CONNECTION_LIMIT:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('The maximum number of connections per second '
                      'allowed for the vip.')),
                VIP_PROTOCOL_PORT:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('TCP port on which to listen for client traffic '
                      'that is associated with the vip address.'),
                    required=True),
                VIP_SESSION_PERSISTENCE:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Configuration of session persistence.'),
                    schema={
                        VIP_SESSION_PERSISTENCE_TYPE:
                        properties.Schema(
                            properties.Schema.STRING,
                            _('Method of implementation of session '
                              'persistence feature.'),
                            required=True,
                            constraints=[
                                constraints.AllowedValues(
                                    ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'])
                            ]),
                        VIP_SESSION_PERSISTENCE_COOKIE_NAME:
                        properties.Schema(
                            properties.Schema.STRING,
                            _('Name of the cookie, '
                              'required if type is APP_COOKIE.'))
                    }),
                VIP_ADMIN_STATE_UP:
                properties.Schema(properties.Schema.BOOLEAN,
                                  _('The administrative state of this vip.'),
                                  default=True),
            },
            required=True),
        MONITORS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of health monitors associated with the pool.'),
            default=[],
            update_allowed=True),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(_('The administrative state of this pool.'),
                          type=attributes.Schema.STRING),
        NAME_ATTR:
        attributes.Schema(_('Name of the pool.'),
                          type=attributes.Schema.STRING),
        PROTOCOL_ATTR:
        attributes.Schema(_('Protocol to balance.'),
                          type=attributes.Schema.STRING),
        SUBNET_ID_ATTR:
        attributes.Schema(_(
            'The subnet for the port on which the members of the pool '
            'will be connected.'),
                          type=attributes.Schema.STRING),
        LB_METHOD_ATTR:
        attributes.Schema(_(
            'The algorithm used to distribute load between the members '
            'of the pool.'),
                          type=attributes.Schema.STRING),
        DESCRIPTION_ATTR:
        attributes.Schema(_('Description of the pool.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Tenant owning the pool.'),
                          type=attributes.Schema.STRING),
        VIP_ATTR:
        attributes.Schema(_('Vip associated with the pool.'),
                          type=attributes.Schema.MAP),
        PROVIDER_ATTR:
        attributes.Schema(
            _('Provider implementing this load balancer instance.'),
            support_status=support.SupportStatus(version='5.0.0'),
            type=attributes.Schema.STRING,
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.SUBNET],
                                        value_path=[self.SUBNET_ID])
        ]

    def validate(self):
        res = super(Pool, self).validate()
        if res:
            return res
        session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
        if session_p is None:
            # session persistence is not configured, skip validation
            return

        persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
        if persistence_type == 'APP_COOKIE':
            if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
                return

            msg = _('Property cookie_name is required, when '
                    'session_persistence type is set to APP_COOKIE.')
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        properties = self.prepare_properties(self.properties,
                                             self.physical_resource_name())
        self.client_plugin().resolve_subnet(properties, self.SUBNET,
                                            'subnet_id')
        vip_properties = properties.pop(self.VIP)
        monitors = properties.pop(self.MONITORS)

        pool = self.client().create_pool({'pool': properties})['pool']
        self.resource_id_set(pool['id'])

        for monitor in monitors:
            self.client().associate_health_monitor(
                pool['id'], {'health_monitor': {
                    'id': monitor
                }})

        vip_arguments = self.prepare_properties(vip_properties,
                                                '%s.vip' % (self.name, ))

        session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
        if session_p is not None:
            prepared_props = self.prepare_properties(session_p, None)
            vip_arguments['session_persistence'] = prepared_props

        vip_arguments['protocol'] = self.properties[self.PROTOCOL]

        if vip_arguments.get(self.VIP_SUBNET) is None:
            vip_arguments['subnet_id'] = properties[self.SUBNET_ID]
        else:
            vip_arguments['subnet_id'] = self.client_plugin().resolve_subnet(
                vip_arguments, self.VIP_SUBNET, 'subnet_id')

        vip_arguments['pool_id'] = pool['id']
        vip = self.client().create_vip({'vip': vip_arguments})['vip']

        self.metadata_set({'vip': vip['id']})

    def _show_resource(self):
        return self.client().show_pool(self.resource_id)['pool']

    def check_create_complete(self, data):
        attributes = self._show_resource()
        status = attributes['status']
        if status == 'PENDING_CREATE':
            return False
        elif status == 'ACTIVE':
            vip_attributes = self.client().show_vip(
                self.metadata_get()['vip'])['vip']
            vip_status = vip_attributes['status']
            if vip_status == 'PENDING_CREATE':
                return False
            if vip_status == 'ACTIVE':
                return True
            if vip_status == 'ERROR':
                raise exception.ResourceInError(
                    resource_status=vip_status,
                    status_reason=_('error in vip'))
            raise exception.ResourceUnknownStatus(
                resource_status=vip_status,
                result=_('Pool creation failed due to vip'))
        elif status == 'ERROR':
            raise exception.ResourceInError(resource_status=status,
                                            status_reason=_('error in pool'))
        else:
            raise exception.ResourceUnknownStatus(
                resource_status=status, result=_('Pool creation failed'))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            if self.MONITORS in prop_diff:
                monitors = set(prop_diff.pop(self.MONITORS))
                old_monitors = set(self.properties[self.MONITORS])
                for monitor in old_monitors - monitors:
                    self.client().disassociate_health_monitor(
                        self.resource_id, monitor)
                for monitor in monitors - old_monitors:
                    self.client().associate_health_monitor(
                        self.resource_id, {'health_monitor': {
                            'id': monitor
                        }})

            if prop_diff:
                self.client().update_pool(self.resource_id,
                                          {'pool': prop_diff})

    def _resolve_attribute(self, name):
        if name == self.VIP_ATTR:
            return self.client().show_vip(self.metadata_get()['vip'])['vip']
        return super(Pool, self)._resolve_attribute(name)

    def handle_delete(self):
        if not self.resource_id:
            prg = progress.PoolDeleteProgress(True)
            return prg

        prg = progress.PoolDeleteProgress()
        if not self.metadata_get():
            prg.vip['delete_called'] = True
            prg.vip['deleted'] = True
        return prg

    def _delete_vip(self):
        return self._not_found_in_call(self.client().delete_vip,
                                       self.metadata_get()['vip'])

    def _check_vip_deleted(self):
        return self._not_found_in_call(self.client().show_vip,
                                       self.metadata_get()['vip'])

    def _delete_pool(self):
        return self._not_found_in_call(self.client().delete_pool,
                                       self.resource_id)

    def check_delete_complete(self, prg):
        if not prg.vip['delete_called']:
            prg.vip['deleted'] = self._delete_vip()
            prg.vip['delete_called'] = True
            return False
        if not prg.vip['deleted']:
            prg.vip['deleted'] = self._check_vip_deleted()
            return False
        if not prg.pool['delete_called']:
            prg.pool['deleted'] = self._delete_pool()
            prg.pool['delete_called'] = True
            return prg.pool['deleted']
        if not prg.pool['deleted']:
            prg.pool['deleted'] = super(Pool, self).check_delete_complete(True)
            return prg.pool['deleted']
        return True
Пример #25
0
class Server(stack_user.StackUser):

    PROPERTIES = (NAME, IMAGE, BLOCK_DEVICE_MAPPING, FLAVOR,
                  FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
                  ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
                  SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
                  RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
                  ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT) = (
                      'name', 'image', 'block_device_mapping', 'flavor',
                      'flavor_update_policy', 'image_update_policy',
                      'key_name', 'admin_user', 'availability_zone',
                      'security_groups', 'networks', 'scheduler_hints',
                      'metadata', 'user_data_format', 'user_data',
                      'reservation_id', 'config_drive', 'diskConfig',
                      'personality', 'admin_pass', 'software_config_transport')

    _BLOCK_DEVICE_MAPPING_KEYS = (
        BLOCK_DEVICE_MAPPING_DEVICE_NAME,
        BLOCK_DEVICE_MAPPING_VOLUME_ID,
        BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
        BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
        BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
    ) = (
        'device_name',
        'volume_id',
        'snapshot_id',
        'volume_size',
        'delete_on_termination',
    )

    _NETWORK_KEYS = (
        NETWORK_UUID,
        NETWORK_ID,
        NETWORK_FIXED_IP,
        NETWORK_PORT,
    ) = (
        'uuid',
        'network',
        'fixed_ip',
        'port',
    )

    _SOFTWARE_CONFIG_FORMATS = (HEAT_CFNTOOLS, RAW,
                                SOFTWARE_CONFIG) = ('HEAT_CFNTOOLS', 'RAW',
                                                    'SOFTWARE_CONFIG')

    _SOFTWARE_CONFIG_TRANSPORTS = (POLL_SERVER_CFN, POLL_SERVER_HEAT,
                                   POLL_TEMP_URL) = ('POLL_SERVER_CFN',
                                                     'POLL_SERVER_HEAT',
                                                     'POLL_TEMP_URL')

    ATTRIBUTES = (
        NAME_ATTR,
        SHOW,
        ADDRESSES,
        NETWORKS_ATTR,
        FIRST_ADDRESS,
        INSTANCE_NAME,
        ACCESSIPV4,
        ACCESSIPV6,
        CONSOLE_URLS,
    ) = (
        'name',
        'show',
        'addresses',
        'networks',
        'first_address',
        'instance_name',
        'accessIPv4',
        'accessIPv6',
        'console_urls',
    )
    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Server name.'),
                          update_allowed=True),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID or name of the image to boot with.'),
            constraints=[constraints.CustomConstraint('glance.image')],
            update_allowed=True),
        BLOCK_DEVICE_MAPPING:
        properties.Schema(
            properties.Schema.LIST,
            _('Block device mappings for this server.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    BLOCK_DEVICE_MAPPING_DEVICE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('A device name where the volume will be '
                          'attached in the system at /dev/device_name. '
                          'This value is typically vda.'),
                        required=True),
                    BLOCK_DEVICE_MAPPING_VOLUME_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The ID of the volume to boot from. Only one '
                          'of volume_id or snapshot_id should be '
                          'provided.'),
                        constraints=[
                            constraints.CustomConstraint('cinder.volume')
                        ]),
                    BLOCK_DEVICE_MAPPING_SNAPSHOT_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The ID of the snapshot to create a volume '
                          'from.'),
                        constraints=[
                            constraints.CustomConstraint('cinder.snapshot')
                        ]),
                    BLOCK_DEVICE_MAPPING_VOLUME_SIZE:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The size of the volume, in GB. It is safe to '
                          'leave this blank and have the Compute service '
                          'infer the size.')),
                    BLOCK_DEVICE_MAPPING_DELETE_ON_TERM:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Indicate whether the volume should be deleted '
                          'when the server is terminated.')),
                },
            )),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID or name of the flavor to boot onto.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.flavor')]),
        FLAVOR_UPDATE_POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to apply a flavor update; either by requesting '
              'a server resize or by replacing the entire server.'),
            default='RESIZE',
            constraints=[
                constraints.AllowedValues(['RESIZE', 'REPLACE']),
            ],
            update_allowed=True),
        IMAGE_UPDATE_POLICY:
        properties.Schema(
            properties.Schema.STRING,
            _('Policy on how to apply an image-id update; either by '
              'requesting a server rebuild or by replacing the entire server'),
            default='REPLACE',
            constraints=[
                constraints.AllowedValues(
                    ['REBUILD', 'REPLACE', 'REBUILD_PRESERVE_EPHEMERAL']),
            ],
            update_allowed=True),
        KEY_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of keypair to inject into the server.'),
            constraints=[constraints.CustomConstraint('nova.keypair')]),
        ADMIN_USER:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the administrative user to use on the server. '
              'This property will be removed from Juno in favor of the '
              'default cloud-init user set up for each image (e.g. "ubuntu" '
              'for Ubuntu 12.04+, "fedora" for Fedora 19+ and "cloud-user" '
              'for CentOS/RHEL 6.5).'),
            support_status=support.SupportStatus(status=support.DEPRECATED)),
        AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the availability zone for server placement.')),
        SECURITY_GROUPS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of security group names or IDs. Cannot be used if '
              'neutron ports are associated with this server; assign '
              'security groups to the ports instead.'),
            default=[]),
        NETWORKS:
        properties.Schema(
            properties.Schema.LIST,
            _('An ordered list of nics to be added to this server, with '
              'information about connected networks, fixed ips, port etc.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_UUID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('ID of network to create a port on.'),
                        support_status=support.SupportStatus(
                            support.DEPRECATED,
                            _('Use property %s.') % NETWORK_ID),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    NETWORK_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or ID of network to create a port on.'),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    NETWORK_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IP address to specify for the port '
                          'created on the requested network.')),
                    NETWORK_PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('ID of an existing port to associate with this '
                          'server.')),
                },
            ),
            update_allowed=True),
        SCHEDULER_HINTS:
        properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key-value pairs specified by the client to help '
              'boot a server.')),
        METADATA:
        properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary key/value metadata to store for this server. Both '
              'keys and values must be 255 characters or less.  Non-string '
              'values will be serialized to JSON (and the serialized '
              'string must be 255 characters or less).'),
            update_allowed=True),
        USER_DATA_FORMAT:
        properties.Schema(
            properties.Schema.STRING,
            _('How the user_data should be formatted for the server. For '
              'HEAT_CFNTOOLS, the user_data is bundled as part of the '
              'heat-cfntools cloud-init boot configuration data. For RAW '
              'the user_data is passed to Nova unmodified. '
              'For SOFTWARE_CONFIG user_data is bundled as part of the '
              'software config data, and metadata is derived from any '
              'associated SoftwareDeployment resources.'),
            default=HEAT_CFNTOOLS,
            constraints=[
                constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
            ]),
        SOFTWARE_CONFIG_TRANSPORT:
        properties.Schema(
            properties.Schema.STRING,
            _('How the server should receive the metadata required for '
              'software configuration. POLL_SERVER_CFN will allow calls to '
              'the cfn API action DescribeStackResource authenticated with '
              'the provided keypair. POLL_SERVER_HEAT will allow calls to '
              'the Heat API resource-show using the provided keystone '
              'credentials. POLL_TEMP_URL will create and populate a '
              'Swift TempURL with metadata for polling.'),
            default=POLL_SERVER_CFN,
            constraints=[
                constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
            ]),
        USER_DATA:
        properties.Schema(properties.Schema.STRING,
                          _('User data script to be executed by cloud-init.'),
                          default=''),
        RESERVATION_ID:
        properties.Schema(properties.Schema.STRING,
                          _('A UUID for the set of servers being requested.')),
        CONFIG_DRIVE:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('If True, enable config drive on the server.')),
        DISK_CONFIG:
        properties.Schema(
            properties.Schema.STRING,
            _('Control how the disk is partitioned when the server is '
              'created.'),
            constraints=[
                constraints.AllowedValues(['AUTO', 'MANUAL']),
            ]),
        PERSONALITY:
        properties.Schema(
            properties.Schema.MAP,
            _('A map of files to create/overwrite on the server upon boot. '
              'Keys are file names and values are the file contents.'),
            default={}),
        ADMIN_PASS:
        properties.Schema(properties.Schema.STRING,
                          _('The administrator password for the server.'),
                          required=False,
                          update_allowed=True),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('Name of the server.')),
        SHOW:
        attributes.Schema(
            _('A dict of all server details as returned by the API.')),
        ADDRESSES:
        attributes.Schema(
            _('A dict of all network addresses with corresponding port_id. '
              'The port ID may be obtained through the following expression: '
              '"{get_attr: [<server>, addresses, <network name>, 0, port]}".')
        ),
        NETWORKS_ATTR:
        attributes.Schema(
            _('A dict of assigned network addresses of the form: '
              '{"public": [ip1, ip2...], "private": [ip3, ip4]}.')),
        FIRST_ADDRESS:
        attributes.Schema(
            _('Convenience attribute to fetch the first assigned network '
              'address, or an empty string if nothing has been assigned at '
              'this time. Result may not be predictable if the server has '
              'addresses from more than one network.'),
            support_status=support.SupportStatus(
                status=support.DEPRECATED,
                message=_('Use the networks attribute instead of '
                          'first_address. For example: "{get_attr: '
                          '[<server name>, networks, <network name>, 0]}"'))),
        INSTANCE_NAME:
        attributes.Schema(_('AWS compatible instance name.')),
        ACCESSIPV4:
        attributes.Schema(
            _('The manually assigned alternative public IPv4 address '
              'of the server.')),
        ACCESSIPV6:
        attributes.Schema(
            _('The manually assigned alternative public IPv6 address '
              'of the server.')),
        CONSOLE_URLS:
        attributes.Schema(
            _("URLs of server's consoles. "
              "To get a specific console type, the requested type "
              "can be specified as parameter to the get_attr function, "
              "e.g. get_attr: [ <server>, console_urls, novnc ]. "
              "Currently supported types are "
              "novnc, xvpvnc, spice-html5, rdp-html5, serial."),
            support_status=support.SupportStatus(version='2015.1')),
    }

    # Server host name limit to 53 characters by due to typical default
    # linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
    physical_resource_name_limit = 53

    default_client_name = 'nova'

    def __init__(self, name, json_snippet, stack):
        super(Server, self).__init__(name, json_snippet, stack)
        if self.user_data_software_config():
            self._register_access_key()

    def _server_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name

        return self.physical_resource_name()

    def _config_drive(self):
        # This method is overridden by the derived CloudServer resource
        return self.properties.get(self.CONFIG_DRIVE)

    def _populate_deployments_metadata(self, meta):
        meta['deployments'] = meta.get('deployments', [])
        if self.transport_poll_server_heat():
            meta['os-collect-config'] = {
                'heat': {
                    'user_id': self._get_user_id(),
                    'password': self.password,
                    'auth_url': self.context.auth_url,
                    'project_id': self.stack.stack_user_project_id,
                    'stack_id': self.stack.identifier().stack_path(),
                    'resource_name': self.name
                }
            }
        elif self.transport_poll_server_cfn():
            meta['os-collect-config'] = {
                'cfn': {
                    'metadata_url':
                    '%s/v1/' % cfg.CONF.heat_metadata_server_url,
                    'access_key_id': self.access_key,
                    'secret_access_key': self.secret_key,
                    'stack_name': self.stack.name,
                    'path': '%s.Metadata' % self.name
                }
            }
        elif self.transport_poll_temp_url():
            container = self.physical_resource_name()
            object_name = str(uuid.uuid4())

            self.client('swift').put_container(container)

            url = self.client_plugin('swift').get_temp_url(container,
                                                           object_name,
                                                           method='GET')
            put_url = self.client_plugin('swift').get_temp_url(
                container, object_name)
            self.data_set('metadata_put_url', put_url)
            self.data_set('metadata_object_name', object_name)

            meta['os-collect-config'] = {'request': {'metadata_url': url}}
            self.client('swift').put_object(container, object_name,
                                            jsonutils.dumps(meta))
        self.metadata_set(meta)

    def _register_access_key(self):
        '''
        Access is limited to this resource, which created the keypair
        '''
        def access_allowed(resource_name):
            return resource_name == self.name

        if self.transport_poll_server_cfn():
            self.stack.register_access_allowed_handler(self.access_key,
                                                       access_allowed)
        elif self.transport_poll_server_heat():
            self.stack.register_access_allowed_handler(self._get_user_id(),
                                                       access_allowed)

    def _create_transport_credentials(self):
        if self.transport_poll_server_cfn():
            self._create_user()
            self._create_keypair()

        elif self.transport_poll_server_heat():
            self.password = uuid.uuid4().hex
            self._create_user()

        self._register_access_key()

    @property
    def access_key(self):
        return self.data().get('access_key')

    @property
    def secret_key(self):
        return self.data().get('secret_key')

    @property
    def password(self):
        return self.data().get('password')

    @password.setter
    def password(self, password):
        if password is None:
            self.data_delete('password')
        else:
            self.data_set('password', password, True)

    def user_data_raw(self):
        return self.properties.get(self.USER_DATA_FORMAT) == self.RAW

    def user_data_software_config(self):
        return self.properties.get(
            self.USER_DATA_FORMAT) == self.SOFTWARE_CONFIG

    def transport_poll_server_cfn(self):
        return self.properties.get(
            self.SOFTWARE_CONFIG_TRANSPORT) == self.POLL_SERVER_CFN

    def transport_poll_server_heat(self):
        return self.properties.get(
            self.SOFTWARE_CONFIG_TRANSPORT) == self.POLL_SERVER_HEAT

    def transport_poll_temp_url(self):
        return self.properties.get(
            self.SOFTWARE_CONFIG_TRANSPORT) == self.POLL_TEMP_URL

    def get_software_config(self, ud_content):
        try:
            sc = self.rpc_client().show_software_config(
                self.context, ud_content)
            return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
        except Exception as ex:
            self.rpc_client().ignore_error_named(ex, 'NotFound')
            return ud_content

    def handle_create(self):
        security_groups = self.properties.get(self.SECURITY_GROUPS)

        user_data_format = self.properties.get(self.USER_DATA_FORMAT)
        ud_content = self.properties.get(self.USER_DATA)
        if self.user_data_software_config() or self.user_data_raw():
            if uuidutils.is_uuid_like(ud_content):
                # attempt to load the userdata from software config
                ud_content = self.get_software_config(ud_content)

        metadata = self.metadata_get(True) or {}

        if self.user_data_software_config():
            self._create_transport_credentials()
            self._populate_deployments_metadata(metadata)

        if self.properties[self.ADMIN_USER]:
            instance_user = self.properties[self.ADMIN_USER]
        elif cfg.CONF.instance_user:
            instance_user = cfg.CONF.instance_user
        else:
            instance_user = None

        userdata = self.client_plugin().build_userdata(
            metadata,
            ud_content,
            instance_user=instance_user,
            user_data_format=user_data_format)

        flavor = self.properties[self.FLAVOR]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]

        image = self.properties.get(self.IMAGE)
        if image:
            image = self.client_plugin('glance').get_image_id(image)

        flavor_id = self.client_plugin().get_flavor_id(flavor)

        instance_meta = self.properties.get(self.METADATA)
        if instance_meta is not None:
            instance_meta = self.client_plugin().meta_serialize(instance_meta)

        scheduler_hints = self.properties.get(self.SCHEDULER_HINTS)
        nics = self._build_nics(self.properties.get(self.NETWORKS))
        block_device_mapping = self._build_block_device_mapping(
            self.properties.get(self.BLOCK_DEVICE_MAPPING))
        reservation_id = self.properties.get(self.RESERVATION_ID)
        disk_config = self.properties.get(self.DISK_CONFIG)
        admin_pass = self.properties.get(self.ADMIN_PASS) or None
        personality_files = self.properties.get(self.PERSONALITY)
        key_name = self.properties.get(self.KEY_NAME)

        server = None
        try:
            server = self.nova().servers.create(
                name=self._server_name(),
                image=image,
                flavor=flavor_id,
                key_name=key_name,
                security_groups=security_groups,
                userdata=userdata,
                meta=instance_meta,
                scheduler_hints=scheduler_hints,
                nics=nics,
                availability_zone=availability_zone,
                block_device_mapping=block_device_mapping,
                reservation_id=reservation_id,
                config_drive=self._config_drive(),
                disk_config=disk_config,
                files=personality_files,
                admin_pass=admin_pass)
        finally:
            # Avoid a race condition where the thread could be canceled
            # before the ID is stored
            if server is not None:
                self.resource_id_set(server.id)

        return server

    def check_create_complete(self, server):
        return self._check_active(server)

    def _check_active(self, server):
        cp = self.client_plugin()
        status = cp.get_status(server)
        if status != 'ACTIVE':
            cp.refresh_server(server)
            status = cp.get_status(server)

        if status in cp.deferred_server_statuses:
            return False
        elif status == 'ACTIVE':
            return True
        elif status == 'ERROR':
            fault = getattr(server, 'fault', {})
            raise resource.ResourceInError(
                resource_status=status,
                status_reason=_("Message: %(message)s, Code: %(code)s") % {
                    'message': fault.get('message', _('Unknown')),
                    'code': fault.get('code', _('Unknown'))
                })
        else:
            raise resource.ResourceUnknownStatus(
                resource_status=server.status,
                result=_('Server is not active'))

    def _check_server_status(self):
        server = self.nova().servers.get(self.resource_id)
        status = self.client_plugin().get_status(server)
        checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
        self._verify_check_conditions(checks)
        return server

    def handle_check(self):
        self._check_server_status()

    @classmethod
    def _build_block_device_mapping(cls, bdm):
        if not bdm:
            return None
        bdm_dict = {}
        for mapping in bdm:
            mapping_parts = []
            snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
            if snapshot_id:
                mapping_parts.append(snapshot_id)
                mapping_parts.append('snap')
            else:
                volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
                mapping_parts.append(volume_id)
                mapping_parts.append('')

            volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
            delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
            if volume_size:
                mapping_parts.append(str(volume_size))
            else:
                mapping_parts.append('')
            if delete:
                mapping_parts.append(str(delete))

            device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
            bdm_dict[device_name] = ':'.join(mapping_parts)

        return bdm_dict

    def _build_nics(self, networks):
        if not networks:
            return None

        nics = []

        for net_data in networks:
            nic_info = {}
            net_identifier = (net_data.get(self.NETWORK_UUID)
                              or net_data.get(self.NETWORK_ID))
            if net_identifier:
                if self.is_using_neutron():
                    net_id = (self.client_plugin('neutron').resolve_network(
                        net_data, self.NETWORK_ID, self.NETWORK_UUID))
                else:
                    net_id = (self.client_plugin('nova').get_nova_network_id(
                        net_identifier))
                nic_info['net-id'] = net_id
            if net_data.get(self.NETWORK_FIXED_IP):
                nic_info['v4-fixed-ip'] = net_data[self.NETWORK_FIXED_IP]
            if net_data.get(self.NETWORK_PORT):
                nic_info['port-id'] = net_data[self.NETWORK_PORT]
            nics.append(nic_info)
        return nics

    def _add_port_for_address(self, server):
        nets = copy.deepcopy(server.addresses)
        ifaces = server.interface_list()
        ip_mac_mapping_on_port_id = dict(
            ((iface.fixed_ips[0]['ip_address'], iface.mac_addr), iface.port_id)
            for iface in ifaces)
        for net_name in nets:
            for addr in nets[net_name]:
                addr['port'] = ip_mac_mapping_on_port_id.get(
                    (addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
        return nets

    def _resolve_attribute(self, name):
        if name == self.FIRST_ADDRESS:
            return self.client_plugin().server_to_ipaddress(
                self.resource_id) or ''
        if name == self.NAME_ATTR:
            return self._server_name()
        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            self.client_plugin().ignore_not_found(e)
            return ''
        if name == self.ADDRESSES:
            return self._add_port_for_address(server)
        if name == self.NETWORKS_ATTR:
            return server.networks
        if name == self.INSTANCE_NAME:
            return server._info.get('OS-EXT-SRV-ATTR:instance_name')
        if name == self.ACCESSIPV4:
            return server.accessIPv4
        if name == self.ACCESSIPV6:
            return server.accessIPv6
        if name == self.SHOW:
            return server._info
        if name == self.CONSOLE_URLS:
            return self.client_plugin('nova').get_console_urls(server)

    def add_dependencies(self, deps):
        super(Server, self).add_dependencies(deps)
        # Depend on any Subnet in this template with the same
        # network_id as the networks attached to this server.
        # It is not known which subnet a server might be assigned
        # to so all subnets in a network should be created before
        # the servers in that network.
        nets = self.properties.get(self.NETWORKS)
        if not nets:
            return
        for res in self.stack.itervalues():
            if res.has_interface('OS::Neutron::Subnet'):
                subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID)
                              or res.properties.get(subnet.Subnet.NETWORK))
                for net in nets:
                    # worry about network_id because that could be the match
                    # assigned to the subnet as well and could have been
                    # created by this stack. Regardless, the server should
                    # still wait on the subnet.
                    net_id = (net.get(self.NETWORK_ID)
                              or net.get(self.NETWORK_UUID))
                    if net_id and net_id == subnet_net:
                        deps += (self, res)
                        break

    def _get_network_matches(self, old_networks, new_networks):
        # make new_networks similar on old_networks
        for net in new_networks:
            for key in ('port', 'network', 'fixed_ip', 'uuid'):
                net.setdefault(key)
        # find matches and remove them from old and new networks
        not_updated_networks = []
        for net in old_networks:
            if net in new_networks:
                new_networks.remove(net)
                not_updated_networks.append(net)
        for net in not_updated_networks:
            old_networks.remove(net)
        return not_updated_networks

    def update_networks_matching_iface_port(self, nets, interfaces):
        def find_equal(port, net_id, ip, nets):
            for net in nets:
                if (net.get('port') == port
                        or (net.get('fixed_ip') == ip
                            and net.get('network') == net_id)):
                    return net

        def find_poor_net(net_id, nets):
            for net in nets:
                if net == {'port': None, 'network': net_id, 'fixed_ip': None}:
                    return net

        for iface in interfaces:
            # get interface properties
            props = {
                'port': iface.port_id,
                'net_id': iface.net_id,
                'ip': iface.fixed_ips[0]['ip_address'],
                'nets': nets
            }
            # try to match by port or network_id with fixed_ip
            net = find_equal(**props)
            if net is not None:
                net['port'] = props['port']
                continue
            # find poor net that has only network_id
            net = find_poor_net(props['net_id'], nets)
            if net is not None:
                net['port'] = props['port']

    def _update_flavor(self, server, prop_diff):
        flavor_update_policy = (prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
                                self.properties.get(self.FLAVOR_UPDATE_POLICY))
        flavor = prop_diff[self.FLAVOR]

        if flavor_update_policy == 'REPLACE':
            raise resource.UpdateReplace(self.name)

        flavor_id = self.client_plugin().get_flavor_id(flavor)
        if not server:
            server = self.nova().servers.get(self.resource_id)
        return scheduler.TaskRunner(self.client_plugin().resize, server,
                                    flavor, flavor_id)

    def _update_image(self, server, prop_diff):
        image_update_policy = (prop_diff.get(self.IMAGE_UPDATE_POLICY) or
                               self.properties.get(self.IMAGE_UPDATE_POLICY))
        if image_update_policy == 'REPLACE':
            raise resource.UpdateReplace(self.name)
        image = prop_diff[self.IMAGE]
        image_id = self.client_plugin('glance').get_image_id(image)
        if not server:
            server = self.nova().servers.get(self.resource_id)
        preserve_ephemeral = (
            image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
        password = (prop_diff.get(self.ADMIN_PASS)
                    or self.properties.get(self.ADMIN_PASS))
        return scheduler.TaskRunner(self.client_plugin().rebuild,
                                    server,
                                    image_id,
                                    password=password,
                                    preserve_ephemeral=preserve_ephemeral)

    def _update_networks(self, server, prop_diff):
        checkers = []
        new_networks = prop_diff.get(self.NETWORKS)
        attach_first_free_port = False
        if not new_networks:
            new_networks = []
            attach_first_free_port = True
        old_networks = self.properties.get(self.NETWORKS)

        if not server:
            server = self.nova().servers.get(self.resource_id)
        interfaces = server.interface_list()

        # if old networks is None, it means that the server got first
        # free port. so we should detach this interface.
        if old_networks is None:
            for iface in interfaces:
                checker = scheduler.TaskRunner(server.interface_detach,
                                               iface.port_id)
                checkers.append(checker)

        # if we have any information in networks field, we should:
        # 1. find similar networks, if they exist
        # 2. remove these networks from new_networks and old_networks
        #    lists
        # 3. detach unmatched networks, which were present in old_networks
        # 4. attach unmatched networks, which were present in new_networks
        else:
            # remove not updated networks from old and new networks lists,
            # also get list these networks
            not_updated_networks = self._get_network_matches(
                old_networks, new_networks)

            self.update_networks_matching_iface_port(
                old_networks + not_updated_networks, interfaces)

            # according to nova interface-detach command detached port
            # will be deleted
            for net in old_networks:
                checker = scheduler.TaskRunner(server.interface_detach,
                                               net.get('port'))
                checkers.append(checker)

        # attach section similar for both variants that
        # were mentioned above

        for net in new_networks:
            if net.get('port'):
                checker = scheduler.TaskRunner(server.interface_attach,
                                               net['port'], None, None)
                checkers.append(checker)
            elif net.get('network'):
                checker = scheduler.TaskRunner(server.interface_attach, None,
                                               net['network'],
                                               net.get('fixed_ip'))
                checkers.append(checker)

        # if new_networks is None, we should attach first free port,
        # according to similar behavior during instance creation
        if attach_first_free_port:
            checker = scheduler.TaskRunner(server.interface_attach, None, None,
                                           None)
            checkers.append(checker)

        return checkers

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if 'Metadata' in tmpl_diff:
            self.metadata_set(tmpl_diff['Metadata'])

        checkers = []
        server = None

        if self.METADATA in prop_diff:
            server = self.nova().servers.get(self.resource_id)
            self.client_plugin().meta_update(server, prop_diff[self.METADATA])

        if self.FLAVOR in prop_diff:
            checkers.append(self._update_flavor(server, prop_diff))

        if self.IMAGE in prop_diff:
            checkers.append(self._update_image(server, prop_diff))
        elif self.ADMIN_PASS in prop_diff:
            if not server:
                server = self.nova().servers.get(self.resource_id)
            server.change_password(prop_diff[self.ADMIN_PASS])

        if self.NAME in prop_diff:
            if not server:
                server = self.nova().servers.get(self.resource_id)
            self.client_plugin().rename(server, prop_diff[self.NAME])

        if self.NETWORKS in prop_diff:
            checkers.extend(self._update_networks(server, prop_diff))

        # Optimization: make sure the first task is started before
        # check_update_complete.
        if checkers:
            checkers[0].start()

        return checkers

    def check_update_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def metadata_update(self, new_metadata=None):
        '''
        Refresh the metadata if new_metadata is None
        '''
        if new_metadata is None:
            # Re-resolve the template metadata and merge it with the
            # current resource metadata.  This is necessary because the
            # attributes referenced in the template metadata may change
            # and the resource itself adds keys to the metadata which
            # are not specified in the template (e.g the deployments data)
            meta = self.metadata_get(refresh=True) or {}
            tmpl_meta = self.t.metadata()
            meta.update(tmpl_meta)
            self.metadata_set(meta)

    @staticmethod
    def _check_maximum(count, maximum, msg):
        '''
        Check a count against a maximum, unless maximum is -1 which indicates
        that there is no limit
        '''
        if maximum != -1 and count > maximum:
            raise exception.StackValidationFailed(message=msg)

    def validate(self):
        '''
        Validate any of the provided params
        '''
        super(Server, self).validate()

        # either volume_id or snapshot_id needs to be specified, but not both
        # for block device mapping.
        bdm = self.properties.get(self.BLOCK_DEVICE_MAPPING) or []
        bootable_vol = False
        for mapping in bdm:
            device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
            if device_name == 'vda':
                bootable_vol = True

            volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
            snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
            if volume_id and snapshot_id:
                raise exception.ResourcePropertyConflict(
                    self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
                    self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
            if not volume_id and not snapshot_id:
                msg = _('Either volume_id or snapshot_id must be specified for'
                        ' device mapping %s') % device_name
                raise exception.StackValidationFailed(message=msg)

        # make sure the image exists if specified.
        image = self.properties.get(self.IMAGE)
        if not image and not bootable_vol:
            msg = _('Neither image nor bootable volume is specified for'
                    ' instance %s') % self.name
            raise exception.StackValidationFailed(message=msg)

        # network properties 'uuid' and 'network' shouldn't be used
        # both at once for all networks
        networks = self.properties.get(self.NETWORKS) or []
        # record if any networks include explicit ports
        networks_with_port = False
        for network in networks:
            networks_with_port = (networks_with_port
                                  or network.get(self.NETWORK_PORT))
            if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
                msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
                        'to the network "%(network)s" for the server '
                        '"%(server)s". The "%(uuid)s" property is deprecated. '
                        'Use only "%(id)s" property.'
                        '') % dict(uuid=self.NETWORK_UUID,
                                   id=self.NETWORK_ID,
                                   network=network[self.NETWORK_ID],
                                   server=self.name)
                raise exception.StackValidationFailed(message=msg)
            elif network.get(self.NETWORK_UUID):
                LOG.info(
                    _LI('For the server "%(server)s" the "%(uuid)s" '
                        'property is set to network "%(network)s". '
                        '"%(uuid)s" property is deprecated. Use '
                        '"%(id)s"  property instead.'),
                    dict(uuid=self.NETWORK_UUID,
                         id=self.NETWORK_ID,
                         network=network[self.NETWORK_ID],
                         server=self.name))

        # retrieve provider's absolute limits if it will be needed
        metadata = self.properties.get(self.METADATA)
        personality = self.properties.get(self.PERSONALITY)
        if metadata is not None or personality:
            limits = self.client_plugin().absolute_limits()

        # if 'security_groups' present for the server and explict 'port'
        # in one or more entries in 'networks', raise validation error
        if networks_with_port and self.properties.get(self.SECURITY_GROUPS):
            raise exception.ResourcePropertyConflict(
                self.SECURITY_GROUPS,
                "/".join([self.NETWORKS, self.NETWORK_PORT]))

        # verify that the number of metadata entries is not greater
        # than the maximum number allowed in the provider's absolute
        # limits
        if metadata is not None:
            msg = _('Instance metadata must not contain greater than %s '
                    'entries.  This is the maximum number allowed by your '
                    'service provider') % limits['maxServerMeta']
            self._check_maximum(len(metadata), limits['maxServerMeta'], msg)

        # verify the number of personality files and the size of each
        # personality file against the provider's absolute limits
        if personality:
            msg = _("The personality property may not contain "
                    "greater than %s entries.") % limits['maxPersonality']
            self._check_maximum(len(personality), limits['maxPersonality'],
                                msg)

            for path, contents in personality.items():
                msg = (_("The contents of personality file \"%(path)s\" "
                         "is larger than the maximum allowed personality "
                         "file size (%(max_size)s bytes).") % {
                             'path': path,
                             'max_size': limits['maxPersonalitySize']
                         })
                self._check_maximum(len(bytes(contents)),
                                    limits['maxPersonalitySize'], msg)

    def _delete_temp_url(self):
        object_name = self.data().get('metadata_object_name')
        if not object_name:
            return
        try:
            container = self.physical_resource_name()
            swift = self.client('swift')
            swift.delete_object(container, object_name)
            headers = swift.head_container(container)
            if int(headers['x-container-object-count']) == 0:
                swift.delete_container(container)
        except Exception as ex:
            self.client_plugin('swift').ignore_not_found(ex)

    def handle_delete(self):

        if self.resource_id is None:
            return

        if self.user_data_software_config():
            self._delete_user()
            self._delete_temp_url()

        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            self.client_plugin().ignore_not_found(e)
        else:
            deleter = scheduler.TaskRunner(self.client_plugin().delete_server,
                                           server)
            deleter.start()
            return deleter

    def check_delete_complete(self, deleter):
        if deleter is None or deleter.step():
            return True
        return False

    def handle_suspend(self):
        '''
        Suspend a server - note we do not wait for the SUSPENDED state,
        this is polled for by check_suspend_complete in a similar way to the
        create logic so we can take advantage of coroutines
        '''
        if self.resource_id is None:
            raise exception.Error(
                _('Cannot suspend %s, resource_id not set') % self.name)

        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            if self.client_plugin().is_not_found(e):
                raise exception.NotFound(
                    _('Failed to find server %s') % self.resource_id)
            else:
                raise
        else:
            LOG.debug('suspending server %s' % self.resource_id)
            # We want the server.suspend to happen after the volume
            # detachement has finished, so pass both tasks and the server
            suspend_runner = scheduler.TaskRunner(server.suspend)
            return server, suspend_runner

    def check_suspend_complete(self, cookie):
        server, suspend_runner = cookie

        if not suspend_runner.started():
            suspend_runner.start()

        if suspend_runner.done():
            if server.status == 'SUSPENDED':
                return True

            cp = self.client_plugin()
            cp.refresh_server(server)
            LOG.debug('%(name)s check_suspend_complete status = %(status)s' % {
                'name': self.name,
                'status': server.status
            })
            if server.status in list(cp.deferred_server_statuses + ['ACTIVE']):
                return server.status == 'SUSPENDED'
            else:
                exc = exception.Error(
                    _('Suspend of server %(server)s failed '
                      'with unknown status: %(status)s') %
                    dict(server=server.name, status=server.status))
                raise exc

    def handle_resume(self):
        '''
        Resume a server - note we do not wait for the ACTIVE state,
        this is polled for by check_resume_complete in a similar way to the
        create logic so we can take advantage of coroutines
        '''
        if self.resource_id is None:
            raise exception.Error(
                _('Cannot resume %s, resource_id not set') % self.name)

        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            if self.client_plugin().is_not_found(e):
                raise exception.NotFound(
                    _('Failed to find server %s') % self.resource_id)
            else:
                raise
        else:
            LOG.debug('resuming server %s' % self.resource_id)
            server.resume()
            return server

    def check_resume_complete(self, server):
        return self._check_active(server)

    def handle_snapshot(self):
        image_id = self.nova().servers.create_image(
            self.resource_id, self.physical_resource_name())
        return image_id

    def check_snapshot_complete(self, image_id):
        image = self.nova().images.get(image_id)
        if image.status == 'ACTIVE':
            self.data_set('snapshot_image_id', image.id)
            return True
        elif image.status == 'ERROR':
            raise exception.Error(image.status)
        return False

    def handle_delete_snapshot(self, snapshot):
        image_id = snapshot['resource_data']['snapshot_image_id']
        try:
            self.nova().images.delete(image_id)
        except Exception as e:
            self.client_plugin().ignore_not_found(e)

    def handle_restore(self, defn, restore_data):
        image_id = restore_data['resource_data']['snapshot_image_id']
        props = dict((key, value) for (
            key,
            value) in six.iteritems(defn.properties(self.properties_schema))
                     if value is not None)
        props[self.IMAGE] = image_id
        return defn.freeze(properties=props)
Пример #26
0
class HealthMonitor(neutron.NeutronResource):
    """A resource for managing health monitors for loadbalancers in Neutron.

    A health monitor is used to determine whether or not back-end members of
    the VIP's pool are usable for processing a request. A pool can have several
    health monitors associated with it. There are different types of health
    monitors supported by the OpenStack LBaaS service:

      - PING: used to ping the members using ICMP.
      - TCP: used to connect to the members using TCP.
      - HTTP: used to send an HTTP request to the member.
      - HTTPS: used to send a secure HTTP request to the member.
    """

    required_service_extension = 'lbaas'

    PROPERTIES = (
        DELAY,
        TYPE,
        MAX_RETRIES,
        TIMEOUT,
        ADMIN_STATE_UP,
        HTTP_METHOD,
        EXPECTED_CODES,
        URL_PATH,
    ) = (
        'delay',
        'type',
        'max_retries',
        'timeout',
        'admin_state_up',
        'http_method',
        'expected_codes',
        'url_path',
    )

    ATTRIBUTES = (
        ADMIN_STATE_UP_ATTR,
        DELAY_ATTR,
        EXPECTED_CODES_ATTR,
        HTTP_METHOD_ATTR,
        MAX_RETRIES_ATTR,
        TIMEOUT_ATTR,
        TYPE_ATTR,
        URL_PATH_ATTR,
        TENANT_ID,
    ) = (
        'admin_state_up',
        'delay',
        'expected_codes',
        'http_method',
        'max_retries',
        'timeout',
        'type',
        'url_path',
        'tenant_id',
    )

    properties_schema = {
        DELAY:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The minimum time in seconds between regular connections of '
              'the member.'),
            required=True,
            update_allowed=True),
        TYPE:
        properties.Schema(properties.Schema.STRING,
                          _('One of predefined health monitor types.'),
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['PING', 'TCP', 'HTTP', 'HTTPS']),
                          ]),
        MAX_RETRIES:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Number of permissible connection failures before changing the '
              'member status to INACTIVE.'),
            required=True,
            update_allowed=True),
        TIMEOUT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum number of seconds for a monitor to wait for a '
              'connection to be established before it times out.'),
            required=True,
            update_allowed=True),
        ADMIN_STATE_UP:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('The administrative state of the health monitor.'),
                          default=True,
                          update_allowed=True),
        HTTP_METHOD:
        properties.Schema(
            properties.Schema.STRING,
            _('The HTTP method used for requests by the monitor of type '
              'HTTP.'),
            update_allowed=True),
        EXPECTED_CODES:
        properties.Schema(
            properties.Schema.STRING,
            _('The list of HTTP status codes expected in response from the '
              'member to declare it healthy.'),
            update_allowed=True),
        URL_PATH:
        properties.Schema(
            properties.Schema.STRING,
            _('The HTTP path used in the HTTP request used by the monitor to '
              'test a member health.'),
            update_allowed=True),
    }

    attributes_schema = {
        ADMIN_STATE_UP_ATTR:
        attributes.Schema(
            _('The administrative state of this health monitor.'),
            type=attributes.Schema.STRING),
        DELAY_ATTR:
        attributes.Schema(_(
            'The minimum time in seconds between regular connections '
            'of the member.'),
                          type=attributes.Schema.STRING),
        EXPECTED_CODES_ATTR:
        attributes.Schema(_(
            'The list of HTTP status codes expected in response '
            'from the member to declare it healthy.'),
                          type=attributes.Schema.LIST),
        HTTP_METHOD_ATTR:
        attributes.Schema(_(
            'The HTTP method used for requests by the monitor of '
            'type HTTP.'),
                          type=attributes.Schema.STRING),
        MAX_RETRIES_ATTR:
        attributes.Schema(_(
            'Number of permissible connection failures before changing '
            'the member status to INACTIVE.'),
                          type=attributes.Schema.STRING),
        TIMEOUT_ATTR:
        attributes.Schema(_(
            'Maximum number of seconds for a monitor to wait for a '
            'connection to be established before it times out.'),
                          type=attributes.Schema.STRING),
        TYPE_ATTR:
        attributes.Schema(_('One of predefined health monitor types.'),
                          type=attributes.Schema.STRING),
        URL_PATH_ATTR:
        attributes.Schema(_(
            'The HTTP path used in the HTTP request used by the monitor '
            'to test a member health.'),
                          type=attributes.Schema.STRING),
        TENANT_ID:
        attributes.Schema(_('Tenant owning the health monitor.'),
                          type=attributes.Schema.STRING),
    }

    def handle_create(self):
        properties = self.prepare_properties(self.properties,
                                             self.physical_resource_name())
        health_monitor = self.client().create_health_monitor(
            {'health_monitor': properties})['health_monitor']
        self.resource_id_set(health_monitor['id'])

    def _show_resource(self):
        return self.client().show_health_monitor(
            self.resource_id)['health_monitor']

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.client().update_health_monitor(self.resource_id,
                                                {'health_monitor': prop_diff})

    def handle_delete(self):
        try:
            self.client().delete_health_monitor(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True
Пример #27
0
class Pool(neutron.NeutronResource):
    """
    A resource for managing load balancer pools in Neutron.
    """

    PROPERTIES = (
        PROTOCOL, SUBNET_ID, LB_METHOD, NAME, DESCRIPTION,
        ADMIN_STATE_UP, VIP, MONITORS,
    ) = (
        'protocol', 'subnet_id', 'lb_method', 'name', 'description',
        'admin_state_up', 'vip', 'monitors',
    )

    _VIP_KEYS = (
        VIP_NAME, VIP_DESCRIPTION, VIP_ADDRESS, VIP_CONNECTION_LIMIT,
        VIP_PROTOCOL_PORT, VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP,
    ) = (
        'name', 'description', 'address', 'connection_limit',
        'protocol_port', 'session_persistence', 'admin_state_up',
    )

    _VIP_SESSION_PERSISTENCE_KEYS = (
        VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME,
    ) = (
        'type', 'cookie_name',
    )

    properties_schema = {
        PROTOCOL: properties.Schema(
            properties.Schema.STRING,
            _('Protocol for balancing.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']),
            ]
        ),
        SUBNET_ID: properties.Schema(
            properties.Schema.STRING,
            _('The subnet on which the members of the pool will be located.'),
            required=True
        ),
        LB_METHOD: properties.Schema(
            properties.Schema.STRING,
            _('The algorithm used to distribute load between the members of '
              'the pool.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ROUND_ROBIN',
                                           'LEAST_CONNECTIONS', 'SOURCE_IP']),
            ],
            update_allowed=True
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the pool.')
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the pool.'),
            update_allowed=True
        ),
        ADMIN_STATE_UP: properties.Schema(
            properties.Schema.BOOLEAN,
            _('The administrative state of this pool.'),
            default=True,
            update_allowed=True
        ),
        VIP: properties.Schema(
            properties.Schema.MAP,
            _('IP address and port of the pool.'),
            schema={
                VIP_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('Name of the vip.')
                ),
                VIP_DESCRIPTION: properties.Schema(
                    properties.Schema.STRING,
                    _('Description of the vip.')
                ),
                VIP_ADDRESS: properties.Schema(
                    properties.Schema.STRING,
                    _('IP address of the vip.')
                ),
                VIP_CONNECTION_LIMIT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('The maximum number of connections per second '
                      'allowed for the vip.')
                ),
                VIP_PROTOCOL_PORT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('TCP port on which to listen for client traffic '
                      'that is associated with the vip address.'),
                    required=True
                ),
                VIP_SESSION_PERSISTENCE: properties.Schema(
                    properties.Schema.MAP,
                    _('Configuration of session persistence.'),
                    schema={
                        VIP_SESSION_PERSISTENCE_TYPE: properties.Schema(
                            properties.Schema.STRING,
                            _('Method of implementation of session '
                              'persistence feature.'),
                            required=True,
                            constraints=[constraints.AllowedValues(
                                ['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
                            )]
                        ),
                        VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
                            properties.Schema.STRING,
                            _('Name of the cookie, '
                              'required if type is APP_COOKIE.')
                        )
                    }
                ),
                VIP_ADMIN_STATE_UP: properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('The administrative state of this vip.'),
                    default=True
                ),
            },
            required=True
        ),
        MONITORS: properties.Schema(
            properties.Schema.LIST,
            _('List of health monitors associated with the pool.'),
            update_allowed=True
        ),
    }

    update_allowed_keys = ('Properties',)

    attributes_schema = {
        'admin_state_up': _('The administrative state of this pool.'),
        'name': _('Name of the pool.'),
        'protocol': _('Protocol to balance.'),
        'subnet_id': _('The subnet on which the members of the pool '
                       'will be located.'),
        'lb_method': _('The algorithm used to distribute load between the '
                       'members of the pool.'),
        'description': _('Description of the pool.'),
        'tenant_id': _('Tenant owning the pool.'),
        'vip': _('Vip associated with the pool.'),
    }

    def validate(self):
        res = super(Pool, self).validate()
        if res:
            return res

        session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
        if session_p is None:
            # session persistence is not configured, skip validation
            return

        persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
        if persistence_type == 'APP_COOKIE':
            if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
                return

            msg = _('Property cookie_name is required, when '
                    'session_persistence type is set to APP_COOKIE.')
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        properties = self.prepare_properties(
            self.properties,
            self.physical_resource_name())
        vip_properties = properties.pop(self.VIP)
        monitors = properties.pop(self.MONITORS, [])
        client = self.neutron()
        pool = client.create_pool({'pool': properties})['pool']
        self.resource_id_set(pool['id'])

        for monitor in monitors:
            client.associate_health_monitor(
                pool['id'], {'health_monitor': {'id': monitor}})

        vip_arguments = self.prepare_properties(
            vip_properties,
            '%s.vip' % (self.name,))

        session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
        if session_p is not None:
            prepared_props = self.prepare_properties(session_p, None)
            vip_arguments['session_persistence'] = prepared_props

        vip_arguments['protocol'] = self.properties[self.PROTOCOL]
        vip_arguments['subnet_id'] = self.properties[self.SUBNET_ID]
        vip_arguments['pool_id'] = pool['id']
        vip = client.create_vip({'vip': vip_arguments})['vip']

        self.metadata = {'vip': vip['id']}

    def _show_resource(self):
        return self.neutron().show_pool(self.resource_id)['pool']

    def check_create_complete(self, data):
        attributes = self._show_resource()
        if attributes['status'] == 'PENDING_CREATE':
            return False
        elif attributes['status'] == 'ACTIVE':
            vip_attributes = self.neutron().show_vip(
                self.metadata['vip'])['vip']
            if vip_attributes['status'] == 'PENDING_CREATE':
                return False
            elif vip_attributes['status'] == 'ACTIVE':
                return True
            raise exception.Error(
                'neutron reported unexpected vip resource[%s] status[%s]' %
                (vip_attributes['name'], vip_attributes['status']))
        raise exception.Error(
            'neutron report unexpected pool resource[%s] status[%s]' %
            (attributes['name'], attributes['status']))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            client = self.neutron()
            monitors = set(prop_diff.pop(self.MONITORS, []))
            if monitors:
                old_monitors = set(self.t['Properties'][self.MONITORS])
                for monitor in old_monitors - monitors:
                    client.disassociate_health_monitor(
                        self.resource_id, {'health_monitor': {'id': monitor}})
                for monitor in monitors - old_monitors:
                    client.associate_health_monitor(
                        self.resource_id, {'health_monitor': {'id': monitor}})

            if prop_diff:
                client.update_pool(self.resource_id, {'pool': prop_diff})

    def _resolve_attribute(self, name):
        if name == 'vip':
            return self.neutron().show_vip(self.metadata['vip'])['vip']
        return super(Pool, self)._resolve_attribute(name)

    def _confirm_vip_delete(self):
        client = self.neutron()
        while True:
            try:
                yield
                client.show_vip(self.metadata['vip'])
            except NeutronClientException as ex:
                self._handle_not_found_exception(ex)
                break

    def handle_delete(self):
        checkers = []
        if self.metadata:
            try:
                self.neutron().delete_vip(self.metadata['vip'])
            except NeutronClientException as ex:
                self._handle_not_found_exception(ex)
            else:
                checkers.append(scheduler.TaskRunner(self._confirm_vip_delete))
        try:
            self.neutron().delete_pool(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            checkers.append(scheduler.TaskRunner(self._confirm_delete))
        return checkers

    def check_delete_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True
Пример #28
0
class Instance(resource.Resource):

    PROPERTIES = (IMAGE_ID, INSTANCE_TYPE, KEY_NAME, AVAILABILITY_ZONE,
                  DISABLE_API_TERMINATION, KERNEL_ID, MONITORING,
                  PLACEMENT_GROUP_NAME, PRIVATE_IP_ADDRESS, RAM_DISK_ID,
                  SECURITY_GROUPS, SECURITY_GROUP_IDS, NETWORK_INTERFACES,
                  SOURCE_DEST_CHECK, SUBNET_ID, TAGS, NOVA_SCHEDULER_HINTS,
                  TENANCY, USER_DATA, VOLUMES, BLOCK_DEVICE_MAPPINGS) = (
                      'ImageId', 'InstanceType', 'KeyName', 'AvailabilityZone',
                      'DisableApiTermination', 'KernelId', 'Monitoring',
                      'PlacementGroupName', 'PrivateIpAddress', 'RamDiskId',
                      'SecurityGroups', 'SecurityGroupIds',
                      'NetworkInterfaces', 'SourceDestCheck', 'SubnetId',
                      'Tags', 'NovaSchedulerHints', 'Tenancy', 'UserData',
                      'Volumes', 'BlockDeviceMappings')

    _TAG_KEYS = (
        TAG_KEY,
        TAG_VALUE,
    ) = (
        'Key',
        'Value',
    )

    _NOVA_SCHEDULER_HINT_KEYS = (
        NOVA_SCHEDULER_HINT_KEY,
        NOVA_SCHEDULER_HINT_VALUE,
    ) = (
        'Key',
        'Value',
    )

    _VOLUME_KEYS = (
        VOLUME_DEVICE,
        VOLUME_ID,
    ) = (
        'Device',
        'VolumeId',
    )

    _BLOCK_DEVICE_MAPPINGS_KEYS = (
        DEVICE_NAME,
        EBS,
        NO_DEVICE,
        VIRTUAL_NAME,
    ) = (
        'DeviceName',
        'Ebs',
        'NoDevice',
        'VirtualName',
    )

    _EBS_KEYS = (
        DELETE_ON_TERMINATION,
        IOPS,
        SNAPSHOT_ID,
        VOLUME_SIZE,
        VOLUME_TYPE,
    ) = ('DeleteOnTermination', 'Iops', 'SnapshotId', 'VolumeSize',
         'VolumeType')

    ATTRIBUTES = (
        AVAILABILITY_ZONE_ATTR,
        PRIVATE_DNS_NAME,
        PUBLIC_DNS_NAME,
        PRIVATE_IP,
        PUBLIC_IP,
    ) = (
        'AvailabilityZone',
        'PrivateDnsName',
        'PublicDnsName',
        'PrivateIp',
        'PublicIp',
    )

    properties_schema = {
        IMAGE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Glance image ID or name.'),
            constraints=[constraints.CustomConstraint('glance.image')],
            required=True),
        # AWS does not require InstanceType but Heat does because the nova
        # create api call requires a flavor
        INSTANCE_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Nova instance type (flavor).'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('nova.flavor')]),
        KEY_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Optional Nova keypair name.'),
            constraints=[constraints.CustomConstraint("nova.keypair")]),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Availability zone to launch the instance in.')),
        DISABLE_API_TERMINATION:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        KERNEL_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        MONITORING:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Not Implemented.'),
                          implemented=False),
        PLACEMENT_GROUP_NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        PRIVATE_IP_ADDRESS:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        RAM_DISK_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          implemented=False),
        SECURITY_GROUPS:
        properties.Schema(properties.Schema.LIST,
                          _('Security group names to assign.')),
        SECURITY_GROUP_IDS:
        properties.Schema(properties.Schema.LIST,
                          _('Security group IDs to assign.')),
        NETWORK_INTERFACES:
        properties.Schema(properties.Schema.LIST,
                          _('Network interfaces to associate with instance.'),
                          update_allowed=True),
        SOURCE_DEST_CHECK:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Not Implemented.'),
                          implemented=False),
        SUBNET_ID:
        properties.Schema(properties.Schema.STRING,
                          _('Subnet ID to launch instance in.'),
                          update_allowed=True),
        TAGS:
        properties.Schema(properties.Schema.LIST,
                          _('Tags to attach to instance.'),
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  TAG_KEY:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  TAG_VALUE:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                              },
                          ),
                          update_allowed=True),
        NOVA_SCHEDULER_HINTS:
        properties.Schema(
            properties.Schema.LIST,
            _('Scheduler hints to pass to Nova (Heat extension).'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NOVA_SCHEDULER_HINT_KEY:
                    properties.Schema(properties.Schema.STRING, required=True),
                    NOVA_SCHEDULER_HINT_VALUE:
                    properties.Schema(properties.Schema.STRING, required=True),
                },
            )),
        TENANCY:
        properties.Schema(properties.Schema.STRING,
                          _('Not Implemented.'),
                          constraints=[
                              constraints.AllowedValues(
                                  ['dedicated', 'default']),
                          ],
                          implemented=False),
        USER_DATA:
        properties.Schema(properties.Schema.STRING,
                          _('User data to pass to instance.')),
        VOLUMES:
        properties.Schema(
            properties.Schema.LIST,
            _('Volumes to attach to instance.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    VOLUME_DEVICE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The device where the volume is exposed on the '
                          'instance. This assignment may not be honored and '
                          'it is advised that the path '
                          '/dev/disk/by-id/virtio-<VolumeId> be used '
                          'instead.'),
                        required=True),
                    VOLUME_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The ID of the volume to be attached.'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('cinder.volume')
                        ]),
                })),
        BLOCK_DEVICE_MAPPINGS:
        properties.Schema(
            properties.Schema.LIST,
            _('Block device mappings to attach to instance.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DEVICE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('A device name where the volume will be '
                          'attached in the system at /dev/device_name.'
                          'e.g. vdb'),
                        required=True,
                    ),
                    EBS:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('The ebs volume to attach to the instance.'),
                        schema={
                            DELETE_ON_TERMINATION:
                            properties.Schema(
                                properties.Schema.BOOLEAN,
                                _('Indicate whether the volume should be '
                                  'deleted when the instance is terminated.'),
                                default=True),
                            IOPS:
                            properties.Schema(
                                properties.Schema.NUMBER,
                                _('The number of I/O operations per second '
                                  'that the volume supports.'),
                                implemented=False),
                            SNAPSHOT_ID:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('The ID of the snapshot to create '
                                  'a volume from.'),
                                constraints=[
                                    constraints.CustomConstraint(
                                        'cinder.snapshot')
                                ]),
                            VOLUME_SIZE:
                            properties.Schema(
                                properties.Schema.STRING,
                                _('The size of the volume, in GB. Must be '
                                  'equal or greater than the size of the '
                                  'snapshot. It is safe to leave this blank '
                                  'and have the Compute service infer '
                                  'the size.'),
                            ),
                            VOLUME_TYPE:
                            properties.Schema(properties.Schema.STRING,
                                              _('The volume type.'),
                                              implemented=False),
                        },
                    ),
                    NO_DEVICE:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('The can be used to unmap a defined device.'),
                        implemented=False),
                    VIRTUAL_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The name of the virtual device. The name must be '
                          'in the form ephemeralX where X is a number '
                          'starting from zero (0); for example, ephemeral0.'),
                        implemented=False),
                },
            ),
        ),
    }

    attributes_schema = {
        AVAILABILITY_ZONE_ATTR:
        attributes.Schema(
            _('The Availability Zone where the specified instance is '
              'launched.')),
        PRIVATE_DNS_NAME:
        attributes.Schema(_('Private DNS name of the specified instance.')),
        PUBLIC_DNS_NAME:
        attributes.Schema(_('Public DNS name of the specified instance.')),
        PRIVATE_IP:
        attributes.Schema(_('Private IP address of the specified instance.')),
        PUBLIC_IP:
        attributes.Schema(_('Public IP address of the specified instance.')),
    }

    # Server host name limit to 53 characters by due to typical default
    # linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
    physical_resource_name_limit = 53

    default_client_name = 'nova'

    def __init__(self, name, json_snippet, stack):
        super(Instance, self).__init__(name, json_snippet, stack)
        self.ipaddress = None

    def _set_ipaddress(self, networks):
        '''
        Read the server's IP address from a list of networks provided by Nova
        '''
        # Just record the first ipaddress
        for n in networks:
            if len(networks[n]) > 0:
                self.ipaddress = networks[n][0]
                break

    def _ipaddress(self):
        '''
        Return the server's IP address, fetching it from Nova if necessary
        '''
        if self.ipaddress is None:
            self.ipaddress = self.client_plugin().server_to_ipaddress(
                self.resource_id)

        return self.ipaddress or '0.0.0.0'

    def _availability_zone(self):
        '''
        Return Server's Availability Zone, fetching it from Nova if necessary.
        '''
        availability_zone = self.properties[self.AVAILABILITY_ZONE]
        if availability_zone is None:
            try:
                server = self.nova().servers.get(self.resource_id)
                availability_zone = getattr(server,
                                            'OS-EXT-AZ:availability_zone')
            except Exception as e:
                self.client_plugin().ignore_not_found(e)
                return

        return availability_zone

    def _resolve_attribute(self, name):
        res = None
        if name == self.AVAILABILITY_ZONE_ATTR:
            res = self._availability_zone()
        elif name in self.ATTRIBUTES[1:]:
            res = self._ipaddress()

        LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'), {
            'name': self.name,
            'attname': name,
            'res': res
        })
        return six.text_type(res) if res else None

    def _port_data_delete(self):
        # delete the port data which implicit-created
        port_id = self.data().get('port_id')
        if port_id:
            try:
                self.neutron().delete_port(port_id)
            except Exception as ex:
                self.client_plugin('neutron').ignore_not_found(ex)
            self.data_delete('port_id')

    def _build_nics(self,
                    network_interfaces,
                    security_groups=None,
                    subnet_id=None):

        nics = None

        if network_interfaces:
            unsorted_nics = []
            for entry in network_interfaces:
                nic = (entry if not isinstance(entry, six.string_types) else {
                    'NetworkInterfaceId': entry,
                    'DeviceIndex': len(unsorted_nics)
                })
                unsorted_nics.append(nic)
            sorted_nics = sorted(unsorted_nics,
                                 key=lambda nic: int(nic['DeviceIndex']))
            nics = [{
                'port-id': snic['NetworkInterfaceId']
            } for snic in sorted_nics]
        else:
            # if SubnetId property in Instance, ensure subnet exists
            if subnet_id:
                neutronclient = self.neutron()
                network_id = self.client_plugin(
                    'neutron').network_id_from_subnet_id(subnet_id)
                # if subnet verified, create a port to use this subnet
                # if port is not created explicitly, nova will choose
                # the first subnet in the given network.
                if network_id:
                    fixed_ip = {'subnet_id': subnet_id}
                    props = {
                        'admin_state_up': True,
                        'network_id': network_id,
                        'fixed_ips': [fixed_ip]
                    }

                    if security_groups:
                        props['security_groups'] = self.client_plugin(
                            'neutron').get_secgroup_uuids(security_groups)

                    port = neutronclient.create_port({'port': props})['port']

                    # after create the port, set the port-id to
                    # resource data, so that the port can be deleted on
                    # instance delete.
                    self.data_set('port_id', port['id'])

                    nics = [{'port-id': port['id']}]

        return nics

    def _get_security_groups(self):
        security_groups = []
        for key in (self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS):
            if self.properties.get(key) is not None:
                for sg in self.properties.get(key):
                    security_groups.append(sg)
        if not security_groups:
            security_groups = None
        return security_groups

    def _build_block_device_mapping(self, bdm):
        if not bdm:
            return None
        bdm_dict = {}
        for mapping in bdm:
            device_name = mapping.get(self.DEVICE_NAME)
            ebs = mapping.get(self.EBS)
            if ebs:
                mapping_parts = []
                snapshot_id = ebs.get(self.SNAPSHOT_ID)
                volume_size = ebs.get(self.VOLUME_SIZE)
                delete = ebs.get(self.DELETE_ON_TERMINATION)

                if snapshot_id:
                    mapping_parts.append(snapshot_id)
                    mapping_parts.append('snap')
                if volume_size:
                    mapping_parts.append(str(volume_size))
                else:
                    mapping_parts.append('')
                if delete is not None:
                    mapping_parts.append(str(delete))

                bdm_dict[device_name] = ':'.join(mapping_parts)

        return bdm_dict

    def _get_nova_metadata(self, properties):
        if properties is None or properties.get(self.TAGS) is None:
            return None

        return dict((tm[self.TAG_KEY], tm[self.TAG_VALUE])
                    for tm in properties[self.TAGS])

    def handle_create(self):
        security_groups = self._get_security_groups()

        userdata = self.properties[self.USER_DATA] or ''
        flavor = self.properties[self.INSTANCE_TYPE]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]

        image_name = self.properties[self.IMAGE_ID]

        image_id = self.client_plugin('glance').get_image_id(image_name)

        flavor_id = self.client_plugin().get_flavor_id(flavor)

        scheduler_hints = {}
        if self.properties[self.NOVA_SCHEDULER_HINTS]:
            for tm in self.properties[self.NOVA_SCHEDULER_HINTS]:
                # adopted from novaclient shell
                hint = tm[self.NOVA_SCHEDULER_HINT_KEY]
                hint_value = tm[self.NOVA_SCHEDULER_HINT_VALUE]
                if hint in scheduler_hints:
                    if isinstance(scheduler_hints[hint], six.string_types):
                        scheduler_hints[hint] = [scheduler_hints[hint]]
                    scheduler_hints[hint].append(hint_value)
                else:
                    scheduler_hints[hint] = hint_value
        else:
            scheduler_hints = None
        if cfg.CONF.stack_scheduler_hints:
            if scheduler_hints is None:
                scheduler_hints = {}
            scheduler_hints['heat_root_stack_id'] = self.stack.root_stack.id
            scheduler_hints['heat_stack_id'] = self.stack.id
            scheduler_hints['heat_stack_name'] = self.stack.name
            scheduler_hints['heat_path_in_stack'] = self.stack.path_in_stack()
            scheduler_hints['heat_resource_name'] = self.name

        nics = self._build_nics(self.properties[self.NETWORK_INTERFACES],
                                security_groups=security_groups,
                                subnet_id=self.properties[self.SUBNET_ID])

        block_device_mapping = self._build_block_device_mapping(
            self.properties.get(self.BLOCK_DEVICE_MAPPINGS))

        server = None

        # FIXME(shadower): the instance_user config option is deprecated. Once
        # it's gone, we should always use ec2-user for compatibility with
        # CloudFormation.
        if cfg.CONF.instance_user:
            instance_user = cfg.CONF.instance_user
        else:
            instance_user = '******'

        try:
            server = self.nova().servers.create(
                name=self.physical_resource_name(),
                image=image_id,
                flavor=flavor_id,
                key_name=self.properties[self.KEY_NAME],
                security_groups=security_groups,
                userdata=self.client_plugin().build_userdata(
                    self.metadata_get(), userdata, instance_user),
                meta=self._get_nova_metadata(self.properties),
                scheduler_hints=scheduler_hints,
                nics=nics,
                availability_zone=availability_zone,
                block_device_mapping=block_device_mapping)
        finally:
            # Avoid a race condition where the thread could be cancelled
            # before the ID is stored
            if server is not None:
                self.resource_id_set(server.id)

        return server, scheduler.TaskRunner(self._attach_volumes_task())

    def _attach_volumes_task(self):
        attach_tasks = (vol_task.VolumeAttachTask(self.stack, self.resource_id,
                                                  volume_id, device)
                        for volume_id, device in self.volumes())
        return scheduler.PollingTaskGroup(attach_tasks)

    def check_create_complete(self, cookie):
        server, volume_attach_task = cookie
        return (self._check_active(server)
                and self._check_volume_attached(server, volume_attach_task))

    def _check_volume_attached(self, server, volume_attach_task):
        if not volume_attach_task.started():
            self._set_ipaddress(server.networks)
            volume_attach_task.start()
            return volume_attach_task.done()
        else:
            return volume_attach_task.step()

    def _check_active(self, server):
        cp = self.client_plugin()
        status = cp.get_status(server)
        if status != 'ACTIVE':
            cp.refresh_server(server)
            status = cp.get_status(server)

        if status == 'ACTIVE':
            return True

        if status in cp.deferred_server_statuses:
            return False

        if status == 'ERROR':
            fault = getattr(server, 'fault', {})
            raise resource.ResourceInError(
                resource_status=status,
                status_reason=_("Message: %(message)s, Code: %(code)s") % {
                    'message': fault.get('message', _('Unknown')),
                    'code': fault.get('code', _('Unknown'))
                })

        raise resource.ResourceUnknownStatus(
            resource_status=server.status, result=_('Instance is not active'))

    def volumes(self):
        """
        Return an iterator over (volume_id, device) tuples for all volumes
        that should be attached to this instance.
        """
        volumes = self.properties[self.VOLUMES]

        return ((vol[self.VOLUME_ID], vol[self.VOLUME_DEVICE])
                for vol in volumes)

    def _remove_matched_ifaces(self, old_network_ifaces, new_network_ifaces):
        # find matches and remove them from old and new ifaces
        old_network_ifaces_copy = copy.deepcopy(old_network_ifaces)
        for iface in old_network_ifaces_copy:
            if iface in new_network_ifaces:
                new_network_ifaces.remove(iface)
                old_network_ifaces.remove(iface)

    def handle_check(self):
        server = self.nova().servers.get(self.resource_id)
        if not self._check_active(server):
            raise exception.Error(
                _("Instance is not ACTIVE (was: %s)") % server.status.strip())

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if 'Metadata' in tmpl_diff:
            self.metadata_set(tmpl_diff['Metadata'])
        checkers = []
        server = None
        if self.TAGS in prop_diff:
            server = self.nova().servers.get(self.resource_id)
            self.client_plugin().meta_update(
                server, self._get_nova_metadata(prop_diff))

        if self.INSTANCE_TYPE in prop_diff:
            flavor = prop_diff[self.INSTANCE_TYPE]
            flavor_id = self.client_plugin().get_flavor_id(flavor)
            if not server:
                server = self.nova().servers.get(self.resource_id)
            checker = scheduler.TaskRunner(self.client_plugin().resize, server,
                                           flavor, flavor_id)
            checkers.append(checker)
        if self.NETWORK_INTERFACES in prop_diff:
            new_network_ifaces = prop_diff.get(self.NETWORK_INTERFACES)
            old_network_ifaces = self.properties.get(self.NETWORK_INTERFACES)
            subnet_id = (prop_diff.get(self.SUBNET_ID)
                         or self.properties.get(self.SUBNET_ID))
            security_groups = self._get_security_groups()
            if not server:
                server = self.nova().servers.get(self.resource_id)
            # if there is entrys in old_network_ifaces and new_network_ifaces,
            # remove the same entrys from old and new ifaces
            if old_network_ifaces and new_network_ifaces:
                # there are four situations:
                # 1.old includes new, such as: old = 2,3, new = 2
                # 2.new includes old, such as: old = 2,3, new = 1,2,3
                # 3.has overlaps, such as: old = 2,3, new = 1,2
                # 4.different, such as: old = 2,3, new = 1,4
                # detach unmatched ones in old, attach unmatched ones in new
                self._remove_matched_ifaces(old_network_ifaces,
                                            new_network_ifaces)
                if old_network_ifaces:
                    old_nics = self._build_nics(old_network_ifaces)
                    for nic in old_nics:
                        checker = scheduler.TaskRunner(server.interface_detach,
                                                       nic['port-id'])
                        checkers.append(checker)
                if new_network_ifaces:
                    new_nics = self._build_nics(new_network_ifaces)
                    for nic in new_nics:
                        checker = scheduler.TaskRunner(server.interface_attach,
                                                       nic['port-id'], None,
                                                       None)
                        checkers.append(checker)
            # if the interfaces not come from property 'NetworkInterfaces',
            # the situation is somewhat complex, so to detach the old ifaces,
            # and then attach the new ones.
            else:
                interfaces = server.interface_list()
                for iface in interfaces:
                    checker = scheduler.TaskRunner(server.interface_detach,
                                                   iface.port_id)
                    checkers.append(checker)
                # first to delete the port which implicit-created by heat
                self._port_data_delete()
                nics = self._build_nics(new_network_ifaces,
                                        security_groups=security_groups,
                                        subnet_id=subnet_id)
                # 'SubnetId' property is empty(or None) and
                # 'NetworkInterfaces' property is empty(or None),
                # _build_nics() will return nics = None,we should attach
                # first free port, according to similar behavior during
                # instance creation
                if not nics:
                    checker = scheduler.TaskRunner(server.interface_attach,
                                                   None, None, None)
                    checkers.append(checker)
                else:
                    for nic in nics:
                        checker = scheduler.TaskRunner(server.interface_attach,
                                                       nic['port-id'], None,
                                                       None)
                        checkers.append(checker)

        if checkers:
            checkers[0].start()
        return checkers

    def check_update_complete(self, checkers):
        '''Push all checkers to completion in list order.'''
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def metadata_update(self, new_metadata=None):
        '''
        Refresh the metadata if new_metadata is None
        '''
        if new_metadata is None:
            self.metadata_set(self.t.metadata())

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(Instance, self).validate()
        if res:
            return res

        # check validity of security groups vs. network interfaces
        security_groups = self._get_security_groups()
        if security_groups and self.properties.get(self.NETWORK_INTERFACES):
            raise exception.ResourcePropertyConflict(
                '/'.join([self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS]),
                self.NETWORK_INTERFACES)

        # check bdm property
        # now we don't support without snapshot_id in bdm
        bdm = self.properties.get(self.BLOCK_DEVICE_MAPPINGS)
        if bdm:
            for mapping in bdm:
                ebs = mapping.get(self.EBS)
                if ebs:
                    snapshot_id = ebs.get(self.SNAPSHOT_ID)
                    if not snapshot_id:
                        msg = _("SnapshotId is missing, this is required "
                                "when specifying BlockDeviceMappings.")
                        raise exception.StackValidationFailed(message=msg)
                else:
                    msg = _("Ebs is missing, this is required "
                            "when specifying BlockDeviceMappings.")
                    raise exception.StackValidationFailed(message=msg)

    def handle_delete(self):
        # make sure to delete the port which implicit-created by heat
        self._port_data_delete()

        if self.resource_id is None:
            return
        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            self.client_plugin().ignore_not_found(e)
            return
        deleter = scheduler.TaskRunner(self.client_plugin().delete_server,
                                       server)
        deleter.start()
        return deleter

    def check_delete_complete(self, deleter):
        # if the resource was already deleted, deleters will be None
        if deleter:
            if not deleter.step():
                return False
        return True

    def handle_suspend(self):
        '''
        Suspend an instance - note we do not wait for the SUSPENDED state,
        this is polled for by check_suspend_complete in a similar way to the
        create logic so we can take advantage of coroutines
        '''
        if self.resource_id is None:
            raise exception.Error(
                _('Cannot suspend %s, resource_id not set') % self.name)

        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            if self.client_plugin().is_not_found(e):
                raise exception.NotFound(
                    _('Failed to find instance %s') % self.resource_id)
        else:
            LOG.debug("suspending instance %s" % self.resource_id)
            server.suspend()
            return server

    def check_suspend_complete(self, server):

        if server.status == 'SUSPENDED':
            return True

        cp = self.client_plugin()
        cp.refresh_server(server)
        LOG.debug("%(name)s check_suspend_complete "
                  "status = %(status)s", {
                      'name': self.name,
                      'status': server.status
                  })
        if server.status in list(cp.deferred_server_statuses + ['ACTIVE']):
            return server.status == 'SUSPENDED'
        else:
            raise exception.Error(
                _(' nova reported unexpected '
                  'instance[%(instance)s] '
                  'status[%(status)s]') % {
                      'instance': self.name,
                      'status': server.status
                  })

    def handle_resume(self):
        '''
        Resume an instance - note we do not wait for the ACTIVE state,
        this is polled for by check_resume_complete in a similar way to the
        create logic so we can take advantage of coroutines
        '''
        if self.resource_id is None:
            raise exception.Error(
                _('Cannot resume %s, resource_id not set') % self.name)

        try:
            server = self.nova().servers.get(self.resource_id)
        except Exception as e:
            if self.client_plugin().is_not_found(e):
                raise exception.NotFound(
                    _('Failed to find instance %s') % self.resource_id)
        else:
            LOG.debug("resuming instance %s" % self.resource_id)
            server.resume()
            return server

    def check_resume_complete(self, server):
        return self._check_active(server)
Пример #29
0
class Subnet(neutron.NeutronResource):
    """A resource for managing Neutron subnets.

    A subnet represents an IP address block that can be used for assigning IP
    addresses to virtual instances. Each subnet must have a CIDR and must be
    associated with a network. IPs can be either selected from the whole subnet
    CIDR, or from "allocation pools" that can be specified by the user.
    """

    entity = 'subnet'

    PROPERTIES = (
        NETWORK_ID,
        NETWORK,
        SUBNETPOOL,
        PREFIXLEN,
        CIDR,
        VALUE_SPECS,
        NAME,
        IP_VERSION,
        DNS_NAMESERVERS,
        GATEWAY_IP,
        ENABLE_DHCP,
        ALLOCATION_POOLS,
        TENANT_ID,
        HOST_ROUTES,
        IPV6_RA_MODE,
        IPV6_ADDRESS_MODE,
        SEGMENT,
        TAGS,
    ) = (
        'network_id',
        'network',
        'subnetpool',
        'prefixlen',
        'cidr',
        'value_specs',
        'name',
        'ip_version',
        'dns_nameservers',
        'gateway_ip',
        'enable_dhcp',
        'allocation_pools',
        'tenant_id',
        'host_routes',
        'ipv6_ra_mode',
        'ipv6_address_mode',
        'segment',
        'tags',
    )

    _ALLOCATION_POOL_KEYS = (
        ALLOCATION_POOL_START,
        ALLOCATION_POOL_END,
    ) = (
        'start',
        'end',
    )

    _HOST_ROUTES_KEYS = (
        ROUTE_DESTINATION,
        ROUTE_NEXTHOP,
    ) = (
        'destination',
        'nexthop',
    )

    _IPV6_DHCP_MODES = (
        DHCPV6_STATEFUL,
        DHCPV6_STATELESS,
        SLAAC,
    ) = (
        'dhcpv6-stateful',
        'dhcpv6-stateless',
        'slaac',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        NETWORK_ID_ATTR,
        TENANT_ID_ATTR,
        ALLOCATION_POOLS_ATTR,
        GATEWAY_IP_ATTR,
        HOST_ROUTES_ATTR,
        IP_VERSION_ATTR,
        CIDR_ATTR,
        DNS_NAMESERVERS_ATTR,
        ENABLE_DHCP_ATTR,
    ) = (
        'name',
        'network_id',
        'tenant_id',
        'allocation_pools',
        'gateway_ip',
        'host_routes',
        'ip_version',
        'cidr',
        'dns_nameservers',
        'enable_dhcp',
    )

    properties_schema = {
        NETWORK_ID:
        properties.Schema(
            properties.Schema.STRING,
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='5.0.0',
                message=_('Use property %s.') % NETWORK,
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED, version='2014.2')),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the attached network.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.network')],
            support_status=support.SupportStatus(version='2014.2')),
        SUBNETPOOL:
        properties.Schema(
            properties.Schema.STRING,
            _('The name or ID of the subnet pool.'),
            constraints=[constraints.CustomConstraint('neutron.subnetpool')],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Prefix length for subnet allocation from subnet pool.'),
            constraints=[constraints.Range(min=0)],
            support_status=support.SupportStatus(version='6.0.0'),
        ),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _('The CIDR.'),
            constraints=[constraints.CustomConstraint('net_cidr')]),
        VALUE_SPECS:
        properties.Schema(properties.Schema.MAP,
                          _('Extra parameters to include in the request.'),
                          default={},
                          update_allowed=True),
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of the subnet.'),
                          update_allowed=True),
        IP_VERSION:
        properties.Schema(properties.Schema.INTEGER,
                          _('The IP version, which is 4 or 6.'),
                          default=4,
                          constraints=[
                              constraints.AllowedValues([4, 6]),
                          ]),
        DNS_NAMESERVERS:
        properties.Schema(properties.Schema.LIST,
                          _('A specified set of DNS name servers to be used.'),
                          default=[],
                          update_allowed=True),
        GATEWAY_IP:
        properties.Schema(
            properties.Schema.STRING,
            _('The gateway IP address. Set to any of [ null | ~ | "" ] '
              'to create/update a subnet without a gateway. '
              'If omitted when creation, neutron will assign the first '
              'free IP address within the subnet to the gateway '
              'automatically. If remove this from template when update, '
              'the old gateway IP address will be detached.'),
            update_allowed=True),
        ENABLE_DHCP:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Set to true if DHCP is enabled and false if DHCP is disabled.'),
            default=True,
            update_allowed=True),
        ALLOCATION_POOLS:
        properties.Schema(
            properties.Schema.LIST,
            _('The start and end addresses for the allocation pools.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ALLOCATION_POOL_START:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Start address for the allocation pool.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                    ALLOCATION_POOL_END:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('End address for the allocation pool.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the tenant who owns the network. Only administrative '
              'users can specify a tenant ID other than their own.')),
        HOST_ROUTES:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of host route dictionaries for the subnet.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ROUTE_DESTINATION:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The destination for static route.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('net_cidr')
                                     ]),
                    ROUTE_NEXTHOP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The next hop for the destination.'),
                        required=True,
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
            update_allowed=True),
        IPV6_RA_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('IPv6 RA (Router Advertisement) mode.'),
            constraints=[
                constraints.AllowedValues(
                    [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]),
            ],
            support_status=support.SupportStatus(version='2015.1')),
        IPV6_ADDRESS_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('IPv6 address mode.'),
            constraints=[
                constraints.AllowedValues(
                    [DHCPV6_STATEFUL, DHCPV6_STATELESS, SLAAC]),
            ],
            support_status=support.SupportStatus(version='2015.1')),
        SEGMENT:
        properties.Schema(
            properties.Schema.STRING,
            _('The name/ID of the segment to associate.'),
            constraints=[constraints.CustomConstraint('neutron.segment')],
            support_status=support.SupportStatus(version='9.0.0')),
        TAGS:
        properties.Schema(
            properties.Schema.LIST,
            _('The tags to be added to the subnet.'),
            schema=properties.Schema(properties.Schema.STRING),
            update_allowed=True,
            support_status=support.SupportStatus(version='9.0.0')),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_("Friendly name of the subnet."),
                          type=attributes.Schema.STRING),
        NETWORK_ID_ATTR:
        attributes.Schema(_("Parent network of the subnet."),
                          type=attributes.Schema.STRING),
        TENANT_ID_ATTR:
        attributes.Schema(_("Tenant owning the subnet."),
                          type=attributes.Schema.STRING),
        ALLOCATION_POOLS_ATTR:
        attributes.Schema(_("Ip allocation pools and their ranges."),
                          type=attributes.Schema.LIST),
        GATEWAY_IP_ATTR:
        attributes.Schema(_("Ip of the subnet's gateway."),
                          type=attributes.Schema.STRING),
        HOST_ROUTES_ATTR:
        attributes.Schema(_("Additional routes for this subnet."),
                          type=attributes.Schema.LIST),
        IP_VERSION_ATTR:
        attributes.Schema(_("Ip version for the subnet."),
                          type=attributes.Schema.STRING),
        CIDR_ATTR:
        attributes.Schema(_("CIDR block notation for this subnet."),
                          type=attributes.Schema.STRING),
        DNS_NAMESERVERS_ATTR:
        attributes.Schema(_("List of dns nameservers."),
                          type=attributes.Schema.LIST),
        ENABLE_DHCP_ATTR:
        attributes.Schema(
            _("'true' if DHCP is enabled for this subnet; 'false' otherwise."),
            type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.NETWORK],
                                        value_path=[self.NETWORK_ID]),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.NETWORK],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='network'),
            translation.TranslationRule(props,
                                        translation.TranslationRule.RESOLVE,
                                        [self.SUBNETPOOL],
                                        client_plugin=self.client_plugin(),
                                        finder='find_resourceid_by_name_or_id',
                                        entity='subnetpool'),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE, [self.SEGMENT],
                client_plugin=self.client_plugin('openstack'),
                finder='find_network_segment')
        ]

    @classmethod
    def _null_gateway_ip(cls, props):
        if cls.GATEWAY_IP not in props:
            return
        # Specifying null in the gateway_ip will result in
        # a property containing an empty string.
        # A null gateway_ip has special meaning in the API
        # so this needs to be set back to None.
        # See bug https://bugs.launchpad.net/heat/+bug/1226666
        if props.get(cls.GATEWAY_IP) == '':
            props[cls.GATEWAY_IP] = None

    def validate(self):
        super(Subnet, self).validate()
        subnetpool = self.properties[self.SUBNETPOOL]
        prefixlen = self.properties[self.PREFIXLEN]
        cidr = self.properties[self.CIDR]
        if subnetpool is not None and cidr:
            raise exception.ResourcePropertyConflict(self.SUBNETPOOL,
                                                     self.CIDR)
        if subnetpool is None and not cidr:
            raise exception.PropertyUnspecifiedError(self.SUBNETPOOL,
                                                     self.CIDR)
        if prefixlen and cidr:
            raise exception.ResourcePropertyConflict(self.PREFIXLEN, self.CIDR)
        ra_mode = self.properties[self.IPV6_RA_MODE]
        address_mode = self.properties[self.IPV6_ADDRESS_MODE]

        if (self.properties[self.IP_VERSION] == 4) and (ra_mode
                                                        or address_mode):
            msg = _('ipv6_ra_mode and ipv6_address_mode are not supported '
                    'for ipv4.')
            raise exception.StackValidationFailed(message=msg)
        if ra_mode and address_mode and (ra_mode != address_mode):
            msg = _('When both ipv6_ra_mode and ipv6_address_mode are set, '
                    'they must be equal.')
            raise exception.StackValidationFailed(message=msg)

        gateway_ip = self.properties.get(self.GATEWAY_IP)
        if (gateway_ip and gateway_ip not in ['~', '']
                and not netutils.is_valid_ip(gateway_ip)):
            msg = (_('Gateway IP address "%(gateway)s" is in '
                     'invalid format.'), gateway_ip)
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['network_id'] = props.pop(self.NETWORK)
        if self.SEGMENT in props and props[self.SEGMENT]:
            props['segment_id'] = props.pop(self.SEGMENT)

        tags = props.pop(self.TAGS, [])

        if self.SUBNETPOOL in props and props[self.SUBNETPOOL]:
            props['subnetpool_id'] = props.pop('subnetpool')
        self._null_gateway_ip(props)

        subnet = self.client().create_subnet({'subnet': props})['subnet']
        self.resource_id_set(subnet['id'])

        if tags:
            self.set_tags(tags)

    def handle_delete(self):
        try:
            self.client().delete_subnet(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            if (self.ALLOCATION_POOLS in prop_diff
                    and prop_diff[self.ALLOCATION_POOLS] is None):
                prop_diff[self.ALLOCATION_POOLS] = []

            # If the new value is '', set to None
            self._null_gateway_ip(prop_diff)
            if self.TAGS in prop_diff:
                tags = prop_diff.pop(self.TAGS)
                self.set_tags(tags)
            self.client().update_subnet(self.resource_id,
                                        {'subnet': prop_diff})
Пример #30
0
class Workflow(signal_responder.SignalResponder,
               resource.Resource):

    support_status = support.SupportStatus(version='2015.1')

    default_client_name = 'mistral'

    entity = 'workflows'

    PROPERTIES = (
        NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS, TASK_DEFAULTS
    ) = (
        'name', 'type', 'description', 'input', 'output', 'tasks', 'params',
        'task_defaults'
    )

    _TASKS_KEYS = (
        TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE, ON_SUCCESS,
        POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT, REQUIRES,
        RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT,
        WITH_ITEMS, KEEP_RESULT, TARGET
    ) = (
        'name', 'description', 'on_error', 'on_complete', 'on_success',
        'policies', 'action', 'workflow', 'publish', 'input', 'requires',
        'retry', 'wait_before', 'wait_after', 'pause_before', 'timeout',
        'with_items', 'keep_result', 'target'
    )

    _TASKS_TASK_DEFAULTS = [
        ON_ERROR, ON_COMPLETE, ON_SUCCESS,
        REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT
    ]

    _SIGNAL_DATA_KEYS = (
        SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS
    ) = (
        'input', 'params'
    )

    ATTRIBUTES = (
        WORKFLOW_DATA, ALARM_URL, EXECUTIONS
    ) = (
        'data', 'alarm_url', 'executions'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Workflow name.')
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Workflow type.'),
            constraints=[
                constraints.AllowedValues(['direct', 'reverse'])
            ],
            required=True,
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Workflow description.'),
            update_allowed=True
        ),
        INPUT: properties.Schema(
            properties.Schema.MAP,
            _('Dictionary which contains input for workflow.'),
            update_allowed=True
        ),
        OUTPUT: properties.Schema(
            properties.Schema.MAP,
            _('Any data structure arbitrarily containing YAQL '
              'expressions that defines workflow output. May be '
              'nested.'),
            update_allowed=True
        ),
        PARAMS: properties.Schema(
            properties.Schema.MAP,
            _("Workflow additional parameters. If Workflow is reverse typed, "
              "params requires 'task_name', which defines initial task."),
            update_allowed=True
        ),
        TASK_DEFAULTS: properties.Schema(
            properties.Schema.MAP,
            _("Default settings for some of task "
              "attributes defined "
              "at workflow level."),
            support_status=support.SupportStatus(version='5.0.0'),
            schema={
                ON_SUCCESS: properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed successfully.')
                ),
                ON_ERROR: properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed with an error.')
                ),
                ON_COMPLETE: properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed regardless of whether '
                      'it is successful or not.')
                ),
                REQUIRES: properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which should be executed before '
                      'this task. Used only in reverse workflows.')
                ),
                RETRY: properties.Schema(
                    properties.Schema.MAP,
                    _('Defines a pattern how task should be repeated in '
                      'case of an error.')
                ),
                WAIT_BEFORE: properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a delay in seconds that Mistral Engine'
                      ' should wait before starting a task.')
                ),
                WAIT_AFTER: properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a delay in seconds that Mistral Engine'
                      ' should wait after a task has completed before'
                      ' starting next tasks defined in '
                      'on-success, on-error or on-complete.')
                ),
                PAUSE_BEFORE: properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('Defines whether Mistral Engine should put the '
                      'workflow on hold or not before starting a task')
                ),
                TIMEOUT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a period of time in seconds after which '
                      'a task will be failed automatically '
                      'by engine if hasn\'t completed.')
                ),
            },
            update_allowed=True
        ),
        TASKS: properties.Schema(
            properties.Schema.LIST,
            _('Dictionary containing workflow tasks.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TASK_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Task name.'),
                        required=True
                    ),
                    TASK_DESCRIPTION: properties.Schema(
                        properties.Schema.STRING,
                        _('Task description.')
                    ),
                    TASK_INPUT: properties.Schema(
                        properties.Schema.MAP,
                        _('Actual input parameter values of the task.')
                    ),
                    ACTION: properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the action associated with the task. '
                          'Either action or workflow may be defined in the '
                          'task.')
                    ),
                    WORKFLOW: properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the workflow associated with the task. '
                          'Can be defined by intrinsic function get_resource '
                          'or by name of the referenced workflow, i.e. '
                          '{ workflow: wf_name } or '
                          '{ workflow: { get_resource: wf_name }}. Either '
                          'action or workflow may be defined in the task.')
                    ),
                    PUBLISH: properties.Schema(
                        properties.Schema.MAP,
                        _('Dictionary of variables to publish to '
                          'the workflow context.')
                    ),
                    ON_SUCCESS: properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed successfully.')
                    ),
                    ON_ERROR: properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed with an error.')
                    ),
                    ON_COMPLETE: properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed regardless of whether '
                          'it is successful or not.')
                    ),
                    POLICIES: properties.Schema(
                        properties.Schema.MAP,
                        _('Dictionary-like section defining task policies '
                          'that influence how Mistral Engine runs tasks. Must '
                          'satisfy Mistral DSL v2.'),
                        support_status=support.SupportStatus(
                            status=support.DEPRECATED,
                            version='5.0.0',
                            message=_('Add needed policies directly to '
                                      'the task, Policy keyword is not '
                                      'needed'),
                            previous_status=support.SupportStatus(
                                version='2015.1'))
                    ),
                    REQUIRES: properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which should be executed before '
                          'this task. Used only in reverse workflows.')
                    ),
                    RETRY: properties.Schema(
                        properties.Schema.MAP,
                        _('Defines a pattern how task should be repeated in '
                          'case of an error.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    WAIT_BEFORE: properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a delay in seconds that Mistral Engine '
                          'should wait before starting a task.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    WAIT_AFTER: properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a delay in seconds that Mistral '
                          'Engine should wait after '
                          'a task has completed before starting next tasks '
                          'defined in on-success, on-error or on-complete.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    PAUSE_BEFORE: properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Defines whether Mistral Engine should '
                          'put the workflow on hold '
                          'or not before starting a task.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    TIMEOUT: properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a period of time in seconds after which a '
                          'task will be failed automatically by engine '
                          'if hasn\'t completed.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    WITH_ITEMS: properties.Schema(
                        properties.Schema.STRING,
                        _('If configured, it allows to run action or workflow '
                          'associated with a task multiple times '
                          'on a provided list of items.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    KEEP_RESULT: properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Allowing not to store action results '
                          'after task completion.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                    TARGET: properties.Schema(
                        properties.Schema.STRING,
                        _('It defines an executor to which task action '
                          'should be sent to.'),
                        support_status=support.SupportStatus(version='5.0.0')
                    ),
                },
            ),
            required=True,
            update_allowed=True
        )
    }

    attributes_schema = {
        WORKFLOW_DATA: attributes.Schema(
            _('A dictionary which contains name and input of the workflow.'),
            type=attributes.Schema.MAP
        ),
        ALARM_URL: attributes.Schema(
            _("A signed url to create executions for workflows specified in "
              "Workflow resource."),
            type=attributes.Schema.STRING
        ),
        EXECUTIONS: attributes.Schema(
            _("List of workflows' executions, each of them is a dictionary "
              "with information about execution. Each dictionary returns "
              "values for next keys: id, workflow_name, created_at, "
              "updated_at, state for current execution state, input, output."),
            type=attributes.Schema.LIST
        )
    }

    def get_reference_id(self):
        return self._workflow_name()

    def _validate_signal_data(self, data):
        if data is not None:
            input_value = data.get(self.SIGNAL_DATA_INPUT)
            params_value = data.get(self.SIGNAL_DATA_PARAMS)
            if input_value is not None:
                if not isinstance(input_value, dict):
                    message = (_('Input in signal data must be a map, '
                                 'find a %s') % type(input_value))
                    raise exception.StackValidationFailed(
                        error=_('Signal data error'),
                        message=message)
                for key in six.iterkeys(input_value):
                    if (self.properties.get(self.INPUT) is None
                            or key not in self.properties.get(self.INPUT)):
                        message = _('Unknown input %s') % key
                        raise exception.StackValidationFailed(
                            error=_('Signal data error'),
                            message=message)
            if params_value is not None and not isinstance(params_value, dict):
                    message = (_('Params must be a map, find a '
                                 '%s') % type(params_value))
                    raise exception.StackValidationFailed(
                        error=_('Signal data error'),
                        message=message)

    def validate(self):
        super(Workflow, self).validate()
        if self.properties.get(self.TYPE) == 'reverse':
            params = self.properties.get(self.PARAMS)
            if params is None or not params.get('task_name'):
                raise exception.StackValidationFailed(
                    error=_('Mistral resource validation error'),
                    path=[self.name,
                          ('properties'
                           if self.stack.t.VERSION == 'heat_template_version'
                           else 'Properties'),
                          self.PARAMS],
                    message=_("'task_name' is not assigned in 'params' "
                              "in case of reverse type workflow.")
                )
        for task in self.properties.get(self.TASKS):
            wf_value = task.get(self.WORKFLOW)
            action_value = task.get(self.ACTION)
            if wf_value and action_value:
                raise exception.ResourcePropertyConflict(self.WORKFLOW,
                                                         self.ACTION)
            if not wf_value and not action_value:
                raise exception.PropertyUnspecifiedError(self.WORKFLOW,
                                                         self.ACTION)
            if (task.get(self.REQUIRES) is not None
                    and self.properties.get(self.TYPE)) == 'direct':
                msg = _("task %(task)s contains property 'requires' "
                        "in case of direct workflow. Only reverse workflows "
                        "can contain property 'requires'.") % {
                    'name': self.name,
                    'task': task.get(self.TASK_NAME)
                }
                raise exception.StackValidationFailed(
                    error=_('Mistral resource validation error'),
                    path=[self.name,
                          ('properties'
                           if self.stack.t.VERSION == 'heat_template_version'
                           else 'Properties'),
                          self.TASKS,
                          task.get(self.TASK_NAME),
                          self.REQUIRES],
                    message=msg)

            if task.get(self.POLICIES) is not None:
                for task_item in task.get(self.POLICIES):
                    if task.get(task_item) is not None:
                        msg = _('Property %(policies)s and %(item)s cannot be '
                                'used both at one time.') % {
                            'policies': self.POLICIES,
                            'item': task_item
                        }
                        raise exception.StackValidationFailed(message=msg)

    def _workflow_name(self):
        return self.properties.get(self.NAME) or self.physical_resource_name()

    def build_tasks(self, props):
        for task in props[self.TASKS]:
            current_task = {}
            wf_value = task.get(self.WORKFLOW)
            if wf_value is not None:
                if wf_value in [res.resource_id
                                for res in six.itervalues(self.stack)]:
                    current_task.update({self.WORKFLOW: wf_value})
                else:
                    msg = _("No such workflow %s") % wf_value
                    raise ValueError(msg)

            # backward support for kilo.
            if task.get(self.POLICIES) is not None:
                task.update(task.get(self.POLICIES))

            task_keys = [key for key in self._TASKS_KEYS
                         if key not in [
                             self.WORKFLOW,
                             self.TASK_NAME,
                             self.POLICIES
                         ]]
            for task_prop in task_keys:
                if task.get(task_prop) is not None:
                    current_task.update(
                        {task_prop.replace('_', '-'): task[task_prop]})

            yield {task[self.TASK_NAME]: current_task}

    def prepare_properties(self, props):
        """Prepare correct YAML-formatted definition for Mistral."""
        defn_name = self._workflow_name()
        definition = {'version': '2.0',
                      defn_name: {self.TYPE: props.get(self.TYPE),
                                  self.DESCRIPTION: props.get(
                                      self.DESCRIPTION),
                                  self.OUTPUT: props.get(self.OUTPUT)}}
        for key in list(definition[defn_name].keys()):
            if definition[defn_name][key] is None:
                del definition[defn_name][key]
        if props.get(self.INPUT) is not None:
            definition[defn_name][self.INPUT] = list(props.get(
                self.INPUT).keys())
        definition[defn_name][self.TASKS] = {}
        for task in self.build_tasks(props):
            definition.get(defn_name).get(self.TASKS).update(task)

        if props.get(self.TASK_DEFAULTS) is not None:
            definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
                k.replace('_', '-'): v for k, v in
                six.iteritems(props.get(self.TASK_DEFAULTS)) if v}

        return yaml.dump(definition, Dumper=yaml.CSafeDumper
                         if hasattr(yaml, 'CSafeDumper')
                         else yaml.SafeDumper)

    def handle_create(self):
        super(Workflow, self).handle_create()
        props = self.prepare_properties(self.properties)
        try:
            workflow = self.client().workflows.create(props)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)
        # NOTE(prazumovsky): Mistral uses unique names for resource
        # identification.
        self.resource_id_set(workflow[0].name)

    def handle_signal(self, details=None):
        self._validate_signal_data(details)

        result_input = {}
        result_params = {}

        if details is not None:
            if details.get(self.INPUT) is not None:
                # NOTE(prazumovsky): Signal can contains some data, interesting
                # for workflow, e.g. inputs. So, if signal data contains input
                # we update override inputs, other leaved defined in template.
                for key, value in six.iteritems(
                        self.properties.get(self.INPUT)):
                    result_input.update(
                        {key: details.get(
                            self.SIGNAL_DATA_INPUT).get(key) or value})
            if details.get(self.SIGNAL_DATA_PARAMS) is not None:
                if self.properties.get(self.PARAMS) is not None:
                    result_params.update(self.properties.get(self.PARAMS))
                result_params.update(details.get(self.SIGNAL_DATA_PARAMS))

        if not result_input and self.properties.get(self.INPUT):
            result_input.update(self.properties.get(self.INPUT))
        if not result_params and self.properties.get(self.PARAMS):
            result_params.update(self.properties.get(self.PARAMS))

        try:
            execution = self.client().executions.create(
                self._workflow_name(),
                jsonutils.dumps(result_input),
                **result_params)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)
        executions = [execution.id]
        if self.EXECUTIONS in self.data():
            executions.extend(self.data().get(self.EXECUTIONS).split(','))
        self.data_set(self.EXECUTIONS, ','.join(executions))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        update_allowed = [self.INPUT, self.PARAMS, self.DESCRIPTION]
        for prop in update_allowed:
            if prop in prop_diff:
                del prop_diff[prop]
        if len(prop_diff) > 0:
            new_props = self.prepare_properties(tmpl_diff['Properties'])
            try:
                workflow = self.client().workflows.update(new_props)
            except Exception as ex:
                raise exception.ResourceFailure(ex, self)
            self.data_set(self.NAME, workflow[0].name)
            self.resource_id_set(workflow[0].name)

    def _delete_executions(self):
        if self.data().get(self.EXECUTIONS):
            for id in self.data().get(self.EXECUTIONS).split(','):
                with self.client_plugin().ignore_not_found:
                    self.client().executions.delete(id)

            self.data_delete('executions')

    def handle_delete(self):
        self._delete_executions()
        return super(Workflow, self).handle_delete()

    def _resolve_attribute(self, name):
        if name == self.EXECUTIONS:
            if self.EXECUTIONS not in self.data():
                return []

            def parse_execution_response(execution):
                return {
                    'id': execution.id,
                    'workflow_name': execution.workflow_name,
                    'created_at': execution.created_at,
                    'updated_at': execution.updated_at,
                    'state': execution.state,
                    'input': jsonutils.loads(six.text_type(execution.input)),
                    'output': jsonutils.loads(six.text_type(execution.output))
                }

            return [parse_execution_response(
                self.client().executions.get(exec_id))
                for exec_id in
                self.data().get(self.EXECUTIONS).split(',')]

        elif name == self.WORKFLOW_DATA:
            return {self.NAME: self.resource_id,
                    self.INPUT: self.properties.get(self.INPUT)}

        elif name == self.ALARM_URL:
            return six.text_type(self._get_ec2_signed_url())

    # TODO(tlashchova): remove this method when mistralclient>1.0.0 is used.
    def _show_resource(self):
        workflow = self.client().workflows.get(self.resource_id)
        if hasattr(workflow, 'to_dict'):
            super(Workflow, self)._show_resource()
        return workflow._data