예제 #1
0
class SoftwareComponent(sc.SoftwareConfig):
    '''
    A resource for describing and storing a software component.

    This resource is similar to OS::Heat::SoftwareConfig. In contrast to
    SoftwareConfig which allows for storing only one configuration (e.g. one
    script), SoftwareComponent allows for storing multiple configurations to
    address handling of all lifecycle hooks (CREATE, UPDATE, SUSPEND, RESUME,
    DELETE) for a software component in one place.

    This resource is backed by the persistence layer and the API of the
    SoftwareConfig resource, and only adds handling for the additional
    'configs' property and attribute.
    '''

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        CONFIGS,
        INPUTS,
        OUTPUTS,
        OPTIONS,
    ) = ('configs', 'inputs', 'outputs', 'options')

    CONFIG_PROPERTIES = (
        CONFIG_ACTIONS,
        CONFIG_CONFIG,
        CONFIG_TOOL,
    ) = (
        'actions',
        'config',
        'tool',
    )

    ATTRIBUTES = (CONFIGS_ATTR, ) = ('configs', )

    # properties schema for one entry in the 'configs' list
    config_schema = properties.Schema(
        properties.Schema.MAP,
        schema={
            CONFIG_ACTIONS:
            properties.Schema(
                # Note: This properties schema allows for custom actions to be
                # specified, which will however require special handling in
                # in-instance hooks. By default, only the standard actions
                # stated below will be handled.
                properties.Schema.LIST,
                _('Lifecycle actions to which the configuration applies. '
                  'The string values provided for this property can include '
                  'the standard resource actions CREATE, DELETE, UPDATE, '
                  'SUSPEND and RESUME supported by Heat.'),
                default=[resource.Resource.CREATE, resource.Resource.UPDATE],
                schema=properties.Schema(properties.Schema.STRING),
                constraints=[
                    constr.Length(min=1),
                ]),
            CONFIG_CONFIG:
            sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.CONFIG],
            CONFIG_TOOL:
            properties.Schema(
                properties.Schema.STRING,
                _('The configuration tool used to actually apply the '
                  'configuration on a server. This string property has '
                  'to be understood by in-instance tools running inside '
                  'deployed servers.'),
                required=True)
        })

    properties_schema = {
        CONFIGS:
        properties.Schema(
            properties.Schema.LIST,
            _('The list of configurations for the different lifecycle actions '
              'of the represented software component.'),
            schema=config_schema,
            constraints=[constr.Length(min=1)],
            required=True),
        INPUTS:
        sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.INPUTS],
        OUTPUTS:
        sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.OUTPUTS],
        OPTIONS:
        sc.SoftwareConfig.properties_schema[sc.SoftwareConfig.OPTIONS],
    }

    def handle_create(self):
        props = dict(self.properties)
        props[self.NAME] = self.physical_resource_name()
        # use config property of SoftwareConfig to store configs list
        configs = self.properties[self.CONFIGS]
        props[self.CONFIG] = {self.CONFIGS: configs}
        # set 'group' to enable component processing by in-instance hook
        props[self.GROUP] = 'component'
        del props['configs']

        sc = self.rpc_client().create_software_config(self.context, **props)
        self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID])

    def _resolve_attribute(self, name):
        '''
        Retrieve attributes of the SoftwareComponent resource.

        'configs' returns the list of configurations for the software
        component's lifecycle actions. If the attribute does not exist,
        an empty list is returned.
        '''
        if name == self.CONFIGS_ATTR and self.resource_id:
            try:
                sc = self.rpc_client().show_software_config(
                    self.context, self.resource_id)
                # configs list is stored in 'config' property of parent class
                # (see handle_create)
                return sc[rpc_api.SOFTWARE_CONFIG_CONFIG].get(self.CONFIGS)
            except Exception as ex:
                self.rpc_client().ignore_error_named(ex, 'NotFound')

    def validate(self):
        '''Validate SoftwareComponent properties consistency.'''
        super(SoftwareComponent, self).validate()

        # One lifecycle action (e.g. CREATE) can only be associated with one
        # config; otherwise a way to define ordering would be required.
        configs = self.properties.get(self.CONFIGS, [])
        config_actions = set()
        for config in configs:
            actions = config.get(self.CONFIG_ACTIONS)
            if any(action in config_actions for action in actions):
                msg = _('Defining more than one configuration for the same '
                        'action in SoftwareComponent "%s" is not allowed.'
                        ) % self.name
                raise exception.StackValidationFailed(message=msg)
            config_actions.update(actions)
예제 #2
0
class DockerContainer(resource.Resource):

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (DOCKER_ENDPOINT, HOSTNAME, USER, MEMORY, PORT_SPECS,
                  PRIVILEGED, TTY, OPEN_STDIN, STDIN_ONCE, ENV, CMD, DNS,
                  IMAGE, VOLUMES, VOLUMES_FROM, PORT_BINDINGS, LINKS, NAME,
                  RESTART_POLICY, CAP_ADD, CAP_DROP, READ_ONLY, CPU_SHARES,
                  DEVICES,
                  CPU_SET) = ('docker_endpoint', 'hostname', 'user', 'memory',
                              'port_specs', 'privileged', 'tty', 'open_stdin',
                              'stdin_once', 'env', 'cmd', 'dns', 'image',
                              'volumes', 'volumes_from', 'port_bindings',
                              'links', 'name', 'restart_policy', 'cap_add',
                              'cap_drop', 'read_only', 'cpu_shares', 'devices',
                              'cpu_set')

    ATTRIBUTES = (
        INFO,
        NETWORK_INFO,
        NETWORK_IP,
        NETWORK_GATEWAY,
        NETWORK_TCP_PORTS,
        NETWORK_UDP_PORTS,
        LOGS,
        LOGS_HEAD,
        LOGS_TAIL,
    ) = (
        'info',
        'network_info',
        'network_ip',
        'network_gateway',
        'network_tcp_ports',
        'network_udp_ports',
        'logs',
        'logs_head',
        'logs_tail',
    )

    _RESTART_POLICY_KEYS = (
        POLICY_NAME,
        POLICY_MAXIMUM_RETRY_COUNT,
    ) = (
        'Name',
        'MaximumRetryCount',
    )

    _DEVICES_KEYS = (PATH_ON_HOST, PATH_IN_CONTAINER,
                     PERMISSIONS) = ('path_on_host', 'path_in_container',
                                     'permissions')

    _CAPABILITIES = [
        'SETPCAP', 'SYS_MODULE', 'SYS_RAWIO', 'SYS_PACCT', 'SYS_ADMIN',
        'SYS_NICE', 'SYS_RESOURCE', 'SYS_TIME', 'SYS_TTY_CONFIG', 'MKNOD',
        'AUDIT_WRITE', 'AUDIT_CONTROL', 'MAC_OVERRIDE', 'MAC_ADMIN',
        'NET_ADMIN', 'SYSLOG', 'CHOWN', 'NET_RAW', 'DAC_OVERRIDE', 'FOWNER',
        'DAC_READ_SEARCH', 'FSETID', 'KILL', 'SETGID', 'SETUID',
        'LINUX_IMMUTABLE', 'NET_BIND_SERVICE', 'NET_BROADCAST', 'IPC_LOCK',
        'IPC_OWNER', 'SYS_CHROOT', 'SYS_PTRACE', 'SYS_BOOT', 'LEASE',
        'SETFCAP', 'WAKE_ALARM', 'BLOCK_SUSPEND', 'ALL'
    ]

    properties_schema = {
        DOCKER_ENDPOINT:
        properties.Schema(
            properties.Schema.STRING,
            _('Docker daemon endpoint (by default the local docker daemon '
              'will be used).'),
            default=None),
        HOSTNAME:
        properties.Schema(properties.Schema.STRING,
                          _('Hostname of the container.'),
                          default=''),
        USER:
        properties.Schema(properties.Schema.STRING,
                          _('Username or UID.'),
                          default=''),
        MEMORY:
        properties.Schema(properties.Schema.INTEGER,
                          _('Memory limit (Bytes).')),
        PORT_SPECS:
        properties.Schema(properties.Schema.LIST,
                          _('TCP/UDP ports mapping.'),
                          default=None),
        PORT_BINDINGS:
        properties.Schema(
            properties.Schema.MAP,
            _('TCP/UDP ports bindings.'),
        ),
        LINKS:
        properties.Schema(
            properties.Schema.MAP,
            _('Links to other containers.'),
        ),
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Name of the container.'),
        ),
        PRIVILEGED:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Enable extended privileges.'),
                          default=False),
        TTY:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Allocate a pseudo-tty.'),
                          default=False),
        OPEN_STDIN:
        properties.Schema(properties.Schema.BOOLEAN,
                          _('Open stdin.'),
                          default=False),
        STDIN_ONCE:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('If true, close stdin after the 1 attached client disconnects.'),
            default=False),
        ENV:
        properties.Schema(
            properties.Schema.LIST,
            _('Set environment variables.'),
        ),
        CMD:
        properties.Schema(properties.Schema.LIST,
                          _('Command to run after spawning the container.'),
                          default=[]),
        DNS:
        properties.Schema(
            properties.Schema.LIST,
            _('Set custom dns servers.'),
        ),
        IMAGE:
        properties.Schema(properties.Schema.STRING, _('Image name.')),
        VOLUMES:
        properties.Schema(properties.Schema.MAP,
                          _('Create a bind mount.'),
                          default={}),
        VOLUMES_FROM:
        properties.Schema(properties.Schema.LIST,
                          _('Mount all specified volumes.'),
                          default=''),
        RESTART_POLICY:
        properties.Schema(
            properties.Schema.MAP,
            _('Restart policies (only supported for API version >= 1.2.0).'),
            schema={
                POLICY_NAME:
                properties.Schema(
                    properties.Schema.STRING,
                    _('The behavior to apply when the container exits.'),
                    default='no',
                    constraints=[
                        constraints.AllowedValues(
                            ['no', 'on-failure', 'always']),
                    ]),
                POLICY_MAXIMUM_RETRY_COUNT:
                properties.Schema(properties.Schema.INTEGER,
                                  _('A maximum restart count for the '
                                    'on-failure policy.'),
                                  default=0)
            },
            default={},
            support_status=support.SupportStatus(version='2015.1')),
        CAP_ADD:
        properties.Schema(
            properties.Schema.LIST,
            _('Be used to add kernel capabilities (only supported for '
              'API version >= 1.2.0).'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('The security features provided by Linux kernels.'),
                constraints=[
                    constraints.AllowedValues(_CAPABILITIES),
                ]),
            default=[],
            support_status=support.SupportStatus(version='2015.1')),
        CAP_DROP:
        properties.Schema(
            properties.Schema.LIST,
            _('Be used to drop kernel capabilities (only supported for '
              'API version >= 1.2.0).'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('The security features provided by Linux kernels.'),
                constraints=[
                    constraints.AllowedValues(_CAPABILITIES),
                ]),
            default=[],
            support_status=support.SupportStatus(version='2015.1')),
        READ_ONLY:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('If true, mount the container\'s root filesystem '
              'as read only (only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['read_only'],
            default=False,
            support_status=support.SupportStatus(version='2015.1'),
        ),
        CPU_SHARES:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Relative weight which determines the allocation of the CPU '
              'processing power(only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['cpu_shares'],
            default=0,
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            _('Device mappings (only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['devices'],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    PATH_ON_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The device path on the host.'),
                        constraints=[
                            constraints.Length(max=255),
                            constraints.AllowedPattern(DEVICE_PATH_REGEX),
                        ],
                        required=True),
                    PATH_IN_CONTAINER:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The device path of the container'
                          ' mappings to the host.'),
                        constraints=[
                            constraints.Length(max=255),
                            constraints.AllowedPattern(DEVICE_PATH_REGEX),
                        ],
                    ),
                    PERMISSIONS:
                    properties.Schema(properties.Schema.STRING,
                                      _('The permissions of the container to'
                                        ' read/write/create the devices.'),
                                      constraints=[
                                          constraints.AllowedValues([
                                              'r', 'w', 'm', 'rw', 'rm', 'wm',
                                              'rwm'
                                          ]),
                                      ],
                                      default='rwm')
                }),
            default=[],
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        CPU_SET:
        properties.Schema(
            properties.Schema.STRING,
            _('The CPUs in which to allow execution '
              '(only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['cpu_set'],
            support_status=support.SupportStatus(version='5.0.0'),
        )
    }

    attributes_schema = {
        INFO: attributes.Schema(_('Container info.')),
        NETWORK_INFO: attributes.Schema(_('Container network info.')),
        NETWORK_IP: attributes.Schema(_('Container ip address.')),
        NETWORK_GATEWAY: attributes.Schema(_('Container ip gateway.')),
        NETWORK_TCP_PORTS: attributes.Schema(_('Container TCP ports.')),
        NETWORK_UDP_PORTS: attributes.Schema(_('Container UDP ports.')),
        LOGS: attributes.Schema(_('Container logs.')),
        LOGS_HEAD: attributes.Schema(_('Container first logs line.')),
        LOGS_TAIL: attributes.Schema(_('Container last logs line.')),
    }

    def get_client(self):
        client = None
        if DOCKER_INSTALLED:
            endpoint = self.properties.get(self.DOCKER_ENDPOINT)
            if endpoint:
                client = docker.APIClient(endpoint)
            else:
                client = docker.APIClient()
        return client

    def _parse_networkinfo_ports(self, networkinfo):
        tcp = []
        udp = []
        for port, info in six.iteritems(networkinfo['Ports']):
            p = port.split('/')
            if not info or len(p) != 2 or 'HostPort' not in info[0]:
                continue
            port = info[0]['HostPort']
            if p[1] == 'tcp':
                tcp.append(port)
            elif p[1] == 'udp':
                udp.append(port)
        return (','.join(tcp), ','.join(udp))

    def _container_networkinfo(self, client, resource_id):
        info = client.inspect_container(self.resource_id)
        networkinfo = info['NetworkSettings']
        ports = self._parse_networkinfo_ports(networkinfo)
        networkinfo['TcpPorts'] = ports[0]
        networkinfo['UdpPorts'] = ports[1]
        return networkinfo

    def _resolve_attribute(self, name):
        if not self.resource_id:
            return
        if name == 'info':
            client = self.get_client()
            return client.inspect_container(self.resource_id)
        if name == 'network_info':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo
        if name == 'network_ip':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['IPAddress']
        if name == 'network_gateway':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['Gateway']
        if name == 'network_tcp_ports':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['TcpPorts']
        if name == 'network_udp_ports':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['UdpPorts']
        if name == 'logs':
            client = self.get_client()
            logs = client.logs(self.resource_id)
            return logs
        if name == 'logs_head':
            client = self.get_client()
            logs = client.logs(self.resource_id)
            return logs.split('\n')[0]
        if name == 'logs_tail':
            client = self.get_client()
            logs = client.logs(self.resource_id)
            return logs.split('\n').pop()

    def handle_create(self):
        create_args = {
            'image': self.properties[self.IMAGE],
            'command': self.properties[self.CMD],
            'hostname': self.properties[self.HOSTNAME],
            'user': self.properties[self.USER],
            'stdin_open': self.properties[self.OPEN_STDIN],
            'tty': self.properties[self.TTY],
            'mem_limit': self.properties[self.MEMORY],
            'ports': self.properties[self.PORT_SPECS],
            'environment': self.properties[self.ENV],
            'dns': self.properties[self.DNS],
            'volumes': self.properties[self.VOLUMES],
            'name': self.properties[self.NAME],
            'cpu_shares': self.properties[self.CPU_SHARES],
            'cpuset': self.properties[self.CPU_SET]
        }
        client = self.get_client()
        client.pull(self.properties[self.IMAGE])
        result = client.create_container(**create_args)
        container_id = result['Id']
        self.resource_id_set(container_id)

        start_args = {}

        if self.properties[self.PRIVILEGED]:
            start_args[self.PRIVILEGED] = True
        if self.properties[self.VOLUMES]:
            start_args['binds'] = self.properties[self.VOLUMES]
        if self.properties[self.VOLUMES_FROM]:
            start_args['volumes_from'] = self.properties[self.VOLUMES_FROM]
        if self.properties[self.PORT_BINDINGS]:
            start_args['port_bindings'] = self.properties[self.PORT_BINDINGS]
        if self.properties[self.LINKS]:
            start_args['links'] = self.properties[self.LINKS]
        if self.properties[self.RESTART_POLICY]:
            start_args['restart_policy'] = self.properties[self.RESTART_POLICY]
        if self.properties[self.CAP_ADD]:
            start_args['cap_add'] = self.properties[self.CAP_ADD]
        if self.properties[self.CAP_DROP]:
            start_args['cap_drop'] = self.properties[self.CAP_DROP]
        if self.properties[self.READ_ONLY]:
            start_args[self.READ_ONLY] = True
        if (self.properties[self.DEVICES]
                and not self.properties[self.PRIVILEGED]):
            start_args['devices'] = self._get_mapping_devices(
                self.properties[self.DEVICES])

        client.start(container_id, **start_args)
        return container_id

    def _get_mapping_devices(self, devices):
        actual_devices = []
        for device in devices:
            if device[self.PATH_IN_CONTAINER]:
                actual_devices.append(':'.join([
                    device[self.PATH_ON_HOST], device[self.PATH_IN_CONTAINER],
                    device[self.PERMISSIONS]
                ]))
            else:
                actual_devices.append(':'.join([
                    device[self.PATH_ON_HOST], device[self.PATH_ON_HOST],
                    device[self.PERMISSIONS]
                ]))
        return actual_devices

    def _get_container_status(self, container_id):
        client = self.get_client()
        info = client.inspect_container(container_id)
        return info['State']

    def check_create_complete(self, container_id):
        status = self._get_container_status(container_id)
        exit_status = status.get('ExitCode')
        if exit_status is not None and exit_status != 0:
            logs = self.get_client().logs(self.resource_id)
            raise exception.ResourceInError(resource_status=self.FAILED,
                                            status_reason=logs)
        return status['Running']

    def handle_delete(self):
        if self.resource_id is None:
            return
        client = self.get_client()
        try:
            client.kill(self.resource_id)
        except docker.errors.APIError as ex:
            if ex.response.status_code != 404:
                raise
        return self.resource_id

    def check_delete_complete(self, container_id):
        if container_id is None:
            return True
        try:
            status = self._get_container_status(container_id)
            if not status['Running']:
                client = self.get_client()
                client.remove_container(container_id)
        except docker.errors.APIError as ex:
            if ex.response.status_code == 404:
                return True
            raise

        return False

    def handle_suspend(self):
        if not self.resource_id:
            return
        client = self.get_client()
        client.stop(self.resource_id)
        return self.resource_id

    def check_suspend_complete(self, container_id):
        status = self._get_container_status(container_id)
        return (not status['Running'])

    def handle_resume(self):
        if not self.resource_id:
            return
        client = self.get_client()
        client.start(self.resource_id)
        return self.resource_id

    def check_resume_complete(self, container_id):
        status = self._get_container_status(container_id)
        return status['Running']

    def validate(self):
        super(DockerContainer, self).validate()
        self._validate_arg_for_api_version()

    def _validate_arg_for_api_version(self):
        version = None
        for key in MIN_API_VERSION_MAP:
            if self.properties[key]:
                if not version:
                    client = self.get_client()
                    version = client.version()['ApiVersion']
                min_version = MIN_API_VERSION_MAP[key]
                if compare_version(min_version, version) < 0:
                    raise InvalidArgForVersion(arg=key,
                                               min_version=min_version)
예제 #3
0
class CloudLoadBalancer(resource.Resource):
    """Represents a Rackspace Cloud Loadbalancer."""

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (
        NAME,
        NODES,
        PROTOCOL,
        ACCESS_LIST,
        HALF_CLOSED,
        ALGORITHM,
        CONNECTION_LOGGING,
        METADATA,
        PORT,
        TIMEOUT,
        CONNECTION_THROTTLE,
        SESSION_PERSISTENCE,
        VIRTUAL_IPS,
        CONTENT_CACHING,
        HEALTH_MONITOR,
        SSL_TERMINATION,
        ERROR_PAGE,
        HTTPS_REDIRECT,
    ) = (
        'name',
        'nodes',
        'protocol',
        'accessList',
        'halfClosed',
        'algorithm',
        'connectionLogging',
        'metadata',
        'port',
        'timeout',
        'connectionThrottle',
        'sessionPersistence',
        'virtualIps',
        'contentCaching',
        'healthMonitor',
        'sslTermination',
        'errorPage',
        'httpsRedirect',
    )

    LB_UPDATE_PROPS = (NAME, ALGORITHM, PROTOCOL, HALF_CLOSED, PORT, TIMEOUT,
                       HTTPS_REDIRECT)

    _NODE_KEYS = (
        NODE_ADDRESSES,
        NODE_PORT,
        NODE_CONDITION,
        NODE_TYPE,
        NODE_WEIGHT,
    ) = (
        'addresses',
        'port',
        'condition',
        'type',
        'weight',
    )

    _ACCESS_LIST_KEYS = (
        ACCESS_LIST_ADDRESS,
        ACCESS_LIST_TYPE,
    ) = (
        'address',
        'type',
    )

    _CONNECTION_THROTTLE_KEYS = (
        CONNECTION_THROTTLE_MAX_CONNECTION_RATE,
        CONNECTION_THROTTLE_MIN_CONNECTIONS,
        CONNECTION_THROTTLE_MAX_CONNECTIONS,
        CONNECTION_THROTTLE_RATE_INTERVAL,
    ) = (
        'maxConnectionRate',
        'minConnections',
        'maxConnections',
        'rateInterval',
    )

    _VIRTUAL_IP_KEYS = (VIRTUAL_IP_TYPE, VIRTUAL_IP_IP_VERSION,
                        VIRTUAL_IP_ID) = ('type', 'ipVersion', 'id')

    _HEALTH_MONITOR_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
        HEALTH_MONITOR_BODY_REGEX,
        HEALTH_MONITOR_HOST_HEADER,
        HEALTH_MONITOR_PATH,
        HEALTH_MONITOR_STATUS_REGEX,
    ) = (
        'attemptsBeforeDeactivation',
        'delay',
        'timeout',
        'type',
        'bodyRegex',
        'hostHeader',
        'path',
        'statusRegex',
    )
    _HEALTH_MONITOR_CONNECT_KEYS = (
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION,
        HEALTH_MONITOR_DELAY,
        HEALTH_MONITOR_TIMEOUT,
        HEALTH_MONITOR_TYPE,
    )

    _SSL_TERMINATION_KEYS = (
        SSL_TERMINATION_SECURE_PORT,
        SSL_TERMINATION_PRIVATEKEY,
        SSL_TERMINATION_CERTIFICATE,
        SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
        SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
    ) = (
        'securePort',
        'privatekey',
        'certificate',
        'intermediateCertificate',
        'secureTrafficOnly',
    )

    ATTRIBUTES = (PUBLIC_IP, VIPS) = ('PublicIp', 'virtualIps')

    ALGORITHMS = [
        "LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
        "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"
    ]

    _health_monitor_schema = {
        HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 10),
                          ]),
        HEALTH_MONITOR_DELAY:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 3600),
                          ]),
        HEALTH_MONITOR_TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 300),
                          ]),
        HEALTH_MONITOR_TYPE:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues(
                                  ['CONNECT', 'HTTP', 'HTTPS']),
                          ]),
        HEALTH_MONITOR_BODY_REGEX:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_HOST_HEADER:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_PATH:
        properties.Schema(properties.Schema.STRING),
        HEALTH_MONITOR_STATUS_REGEX:
        properties.Schema(properties.Schema.STRING),
    }

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        NODES:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NODE_ADDRESSES:
                    properties.Schema(
                        properties.Schema.LIST,
                        required=True,
                        description=(_("IP addresses for the load balancer "
                                       "node. Must have at least one "
                                       "address.")),
                        schema=properties.Schema(properties.Schema.STRING)),
                    NODE_PORT:
                    properties.Schema(properties.Schema.INTEGER,
                                      required=True),
                    NODE_CONDITION:
                    properties.Schema(properties.Schema.STRING,
                                      default='ENABLED',
                                      constraints=[
                                          constraints.AllowedValues([
                                              'ENABLED', 'DISABLED', 'DRAINING'
                                          ]),
                                      ]),
                    NODE_TYPE:
                    properties.Schema(properties.Schema.STRING,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['PRIMARY', 'SECONDARY']),
                                      ]),
                    NODE_WEIGHT:
                    properties.Schema(properties.Schema.NUMBER,
                                      constraints=[
                                          constraints.Range(1, 100),
                                      ]),
                },
            ),
            required=True,
            update_allowed=True),
        PROTOCOL:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.AllowedValues([
                                  'DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS',
                                  'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL',
                                  'POP3', 'POP3S', 'SMTP', 'TCP',
                                  'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM',
                                  'SFTP'
                              ]),
                          ],
                          update_allowed=True),
        ACCESS_LIST:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  ACCESS_LIST_ADDRESS:
                                  properties.Schema(properties.Schema.STRING,
                                                    required=True),
                                  ACCESS_LIST_TYPE:
                                  properties.Schema(
                                      properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.AllowedValues(
                                              ['ALLOW', 'DENY']),
                                      ]),
                              },
                          )),
        HALF_CLOSED:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        ALGORITHM:
        properties.Schema(properties.Schema.STRING,
                          constraints=[constraints.AllowedValues(ALGORITHMS)],
                          update_allowed=True),
        CONNECTION_LOGGING:
        properties.Schema(properties.Schema.BOOLEAN, update_allowed=True),
        METADATA:
        properties.Schema(properties.Schema.MAP, update_allowed=True),
        PORT:
        properties.Schema(properties.Schema.INTEGER,
                          required=True,
                          update_allowed=True),
        TIMEOUT:
        properties.Schema(properties.Schema.NUMBER,
                          constraints=[
                              constraints.Range(1, 120),
                          ],
                          update_allowed=True),
        CONNECTION_THROTTLE:
        properties.Schema(properties.Schema.MAP,
                          schema={
                              CONNECTION_THROTTLE_MAX_CONNECTION_RATE:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(
                                                        0, 100000),
                                                ]),
                              CONNECTION_THROTTLE_MIN_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(1, 1000),
                                                ]),
                              CONNECTION_THROTTLE_MAX_CONNECTIONS:
                              properties.Schema(properties.Schema.INTEGER,
                                                constraints=[
                                                    constraints.Range(
                                                        1, 100000),
                                                ]),
                              CONNECTION_THROTTLE_RATE_INTERVAL:
                              properties.Schema(properties.Schema.NUMBER,
                                                constraints=[
                                                    constraints.Range(1, 3600),
                                                ]),
                          },
                          update_allowed=True),
        SESSION_PERSISTENCE:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['HTTP_COOKIE', 'SOURCE_IP']),
                          ],
                          update_allowed=True),
        VIRTUAL_IPS:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    VIRTUAL_IP_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        "The type of VIP (public or internal). This property"
                        " cannot be specified if 'id' is specified. This "
                        "property must be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['SERVICENET',
                                                       'PUBLIC']),
                        ]),
                    VIRTUAL_IP_IP_VERSION:
                    properties.Schema(
                        properties.Schema.STRING,
                        "IP version of the VIP. This property cannot be "
                        "specified if 'id' is specified. This property must "
                        "be specified if id is not specified.",
                        constraints=[
                            constraints.AllowedValues(['IPV6', 'IPV4']),
                        ]),
                    VIRTUAL_IP_ID:
                    properties.Schema(
                        properties.Schema.NUMBER,
                        "ID of a shared VIP to use instead of creating a "
                        "new one. This property cannot be specified if type"
                        " or version is specified.")
                },
            ),
            required=True,
            constraints=[constraints.Length(min=1)]),
        CONTENT_CACHING:
        properties.Schema(properties.Schema.STRING,
                          constraints=[
                              constraints.AllowedValues(
                                  ['ENABLED', 'DISABLED']),
                          ],
                          update_allowed=True),
        HEALTH_MONITOR:
        properties.Schema(properties.Schema.MAP,
                          schema=_health_monitor_schema,
                          update_allowed=True),
        SSL_TERMINATION:
        properties.Schema(
            properties.Schema.MAP,
            schema={
                SSL_TERMINATION_SECURE_PORT:
                properties.Schema(properties.Schema.INTEGER, default=443),
                SSL_TERMINATION_PRIVATEKEY:
                properties.Schema(properties.Schema.STRING, required=True),
                SSL_TERMINATION_CERTIFICATE:
                properties.Schema(properties.Schema.STRING, required=True),
                # only required if configuring intermediate ssl termination
                # add to custom validation
                SSL_TERMINATION_INTERMEDIATE_CERTIFICATE:
                properties.Schema(properties.Schema.STRING),
                # pyrax will default to false
                SSL_TERMINATION_SECURE_TRAFFIC_ONLY:
                properties.Schema(properties.Schema.BOOLEAN, default=False),
            },
            update_allowed=True),
        ERROR_PAGE:
        properties.Schema(properties.Schema.STRING, update_allowed=True),
        HTTPS_REDIRECT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Enables or disables HTTP to HTTPS redirection for the load "
              "balancer. When enabled, any HTTP request returns status code "
              "301 (Moved Permanently), and the requester is redirected to "
              "the requested URL via the HTTPS protocol on port 443. Only "
              "available for HTTPS protocol (port=443), or HTTP protocol with "
              "a properly configured SSL termination (secureTrafficOnly=true, "
              "securePort=443)."),
            update_allowed=True,
            default=False,
            support_status=support.SupportStatus(version="2015.1"))
    }

    attributes_schema = {
        PUBLIC_IP:
        attributes.Schema(_('Public IP address of the specified instance.')),
        VIPS:
        attributes.Schema(_("A list of assigned virtual ip addresses"))
    }

    ACTIVE_STATUS = 'ACTIVE'
    DELETED_STATUS = 'DELETED'
    PENDING_DELETE_STATUS = 'PENDING_DELETE'

    def __init__(self, name, json_snippet, stack):
        super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
        self.clb = self.cloud_lb()

    def cloud_lb(self):
        return self.client('cloud_lb')

    def _setup_properties(self, properties, function):
        """Use defined schema properties as kwargs for loadbalancer objects."""
        if properties and function:
            return [
                function(**self._remove_none(item_dict))
                for item_dict in properties
            ]
        elif function:
            return [function()]

    def _alter_properties_for_api(self):
        """Set up required, but useless, key/value pairs.

        The following properties have useless key/value pairs which must
        be passed into the api. Set them up to make template definition easier.
        """
        session_persistence = None
        if self.SESSION_PERSISTENCE in self.properties.data:
            session_persistence = {
                'persistenceType': self.properties[self.SESSION_PERSISTENCE]
            }
        connection_logging = None
        if self.CONNECTION_LOGGING in self.properties.data:
            connection_logging = {
                "enabled": self.properties[self.CONNECTION_LOGGING]
            }
        metadata = None
        if self.METADATA in self.properties.data:
            metadata = [{
                'key': k,
                'value': v
            } for k, v in six.iteritems(self.properties[self.METADATA])]

        return (session_persistence, connection_logging, metadata)

    def _check_active(self):
        """Update the loadbalancer state, check the status."""
        loadbalancer = self.clb.get(self.resource_id)
        if loadbalancer.status == self.ACTIVE_STATUS:
            return True
        else:
            return False

    def _valid_HTTPS_redirect_with_HTTP_prot(self):
        """Determine if HTTPS redirect is valid when protocol is HTTP"""
        proto = self.properties[self.PROTOCOL]
        redir = self.properties[self.HTTPS_REDIRECT]
        termcfg = self.properties.get(self.SSL_TERMINATION) or {}
        seconly = termcfg.get(self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY, False)
        secport = termcfg.get(self.SSL_TERMINATION_SECURE_PORT, 0)
        if (redir and (proto == "HTTP") and seconly and (secport == 443)):
            return True
        return False

    def _configure_post_creation(self, loadbalancer):
        """Configure all load balancer properties post creation.

        These properties can only be set after the load balancer is created.
        """
        if self.properties[self.ACCESS_LIST]:
            while not self._check_active():
                yield
            loadbalancer.add_access_list(self.properties[self.ACCESS_LIST])

        if self.properties[self.ERROR_PAGE]:
            while not self._check_active():
                yield
            loadbalancer.set_error_page(self.properties[self.ERROR_PAGE])

        if self.properties[self.SSL_TERMINATION]:
            while not self._check_active():
                yield
            ssl_term = self.properties[self.SSL_TERMINATION]
            loadbalancer.add_ssl_termination(
                ssl_term[self.SSL_TERMINATION_SECURE_PORT],
                ssl_term[self.SSL_TERMINATION_PRIVATEKEY],
                ssl_term[self.SSL_TERMINATION_CERTIFICATE],
                intermediateCertificate=ssl_term[
                    self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE],
                enabled=True,
                secureTrafficOnly=ssl_term[
                    self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY])

        if self._valid_HTTPS_redirect_with_HTTP_prot():
            while not self._check_active():
                yield
            loadbalancer.update(httpsRedirect=True)

        if self.CONTENT_CACHING in self.properties:
            enabled = self.properties[self.CONTENT_CACHING] == 'ENABLED'
            while not self._check_active():
                yield
            loadbalancer.content_caching = enabled

    def _process_node(self, node):
        if not node.get(self.NODE_ADDRESSES):
            yield node
        else:
            for addr in node.get(self.NODE_ADDRESSES):
                norm_node = copy.deepcopy(node)
                norm_node['address'] = addr
                del norm_node[self.NODE_ADDRESSES]
                yield norm_node

    def _process_nodes(self, node_list):
        node_itr = six.moves.map(self._process_node, node_list)
        return itertools.chain.from_iterable(node_itr)

    def _validate_https_redirect(self):
        redir = self.properties[self.HTTPS_REDIRECT]
        proto = self.properties[self.PROTOCOL]

        if (redir and (proto != "HTTPS")
                and not self._valid_HTTPS_redirect_with_HTTP_prot()):
            message = _("HTTPS redirect is only available for the HTTPS "
                        "protocol (port=443), or the HTTP protocol with "
                        "a properly configured SSL termination "
                        "(secureTrafficOnly=true, securePort=443).")
            raise exception.StackValidationFailed(message=message)

    def handle_create(self):
        node_list = self._process_nodes(self.properties.get(self.NODES))
        nodes = [self.clb.Node(**node) for node in node_list]
        vips = self.properties.get(self.VIRTUAL_IPS)

        virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)

        (session_persistence, connection_logging,
         metadata) = self._alter_properties_for_api()

        lb_body = {
            'port': self.properties[self.PORT],
            'protocol': self.properties[self.PROTOCOL],
            'nodes': nodes,
            'virtual_ips': virtual_ips,
            'algorithm': self.properties.get(self.ALGORITHM),
            'halfClosed': self.properties.get(self.HALF_CLOSED),
            'connectionThrottle':
            self.properties.get(self.CONNECTION_THROTTLE),
            'metadata': metadata,
            'healthMonitor': self.properties.get(self.HEALTH_MONITOR),
            'sessionPersistence': session_persistence,
            'timeout': self.properties.get(self.TIMEOUT),
            'connectionLogging': connection_logging,
            self.HTTPS_REDIRECT: self.properties[self.HTTPS_REDIRECT]
        }
        if self._valid_HTTPS_redirect_with_HTTP_prot():
            lb_body[self.HTTPS_REDIRECT] = False
        self._validate_https_redirect()

        lb_name = (self.properties.get(self.NAME)
                   or self.physical_resource_name())
        LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
        loadbalancer = self.clb.create(lb_name, **lb_body)
        self.resource_id_set(str(loadbalancer.id))

        post_create = scheduler.TaskRunner(self._configure_post_creation,
                                           loadbalancer)
        post_create(timeout=600)
        return loadbalancer

    def check_create_complete(self, loadbalancer):
        return self._check_active()

    def handle_check(self):
        loadbalancer = self.clb.get(self.resource_id)
        if not self._check_active():
            raise exception.Error(
                _("Cloud LoadBalancer is not ACTIVE "
                  "(was: %s)") % loadbalancer.status)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """Add and remove nodes specified in the prop_diff."""
        lb = self.clb.get(self.resource_id)
        checkers = []

        if self.NODES in prop_diff:
            updated_nodes = prop_diff[self.NODES]
            checkers.extend(self._update_nodes(lb, updated_nodes))

        updated_props = {}
        for prop in six.iterkeys(prop_diff):
            if prop in self.LB_UPDATE_PROPS:
                updated_props[prop] = prop_diff[prop]
        if updated_props:
            checkers.append(self._update_lb_properties(lb, updated_props))

        if self.HEALTH_MONITOR in prop_diff:
            updated_hm = prop_diff[self.HEALTH_MONITOR]
            checkers.append(self._update_health_monitor(lb, updated_hm))

        if self.SESSION_PERSISTENCE in prop_diff:
            updated_sp = prop_diff[self.SESSION_PERSISTENCE]
            checkers.append(self._update_session_persistence(lb, updated_sp))

        if self.SSL_TERMINATION in prop_diff:
            updated_ssl_term = prop_diff[self.SSL_TERMINATION]
            checkers.append(self._update_ssl_termination(lb, updated_ssl_term))

        if self.METADATA in prop_diff:
            updated_metadata = prop_diff[self.METADATA]
            checkers.append(self._update_metadata(lb, updated_metadata))

        if self.ERROR_PAGE in prop_diff:
            updated_errorpage = prop_diff[self.ERROR_PAGE]
            checkers.append(self._update_errorpage(lb, updated_errorpage))

        if self.CONNECTION_LOGGING in prop_diff:
            updated_cl = prop_diff[self.CONNECTION_LOGGING]
            checkers.append(self._update_connection_logging(lb, updated_cl))

        if self.CONNECTION_THROTTLE in prop_diff:
            updated_ct = prop_diff[self.CONNECTION_THROTTLE]
            checkers.append(self._update_connection_throttle(lb, updated_ct))

        if self.CONTENT_CACHING in prop_diff:
            updated_cc = prop_diff[self.CONTENT_CACHING]
            checkers.append(self._update_content_caching(lb, updated_cc))

        return checkers

    def _update_nodes(self, lb, updated_nodes):
        @retry_if_immutable
        def add_nodes(lb, new_nodes):
            lb.add_nodes(new_nodes)

        @retry_if_immutable
        def remove_node(known, node):
            known[node].delete()

        @retry_if_immutable
        def update_node(known, node):
            known[node].update()

        checkers = []
        current_nodes = lb.nodes
        diff_nodes = self._process_nodes(updated_nodes)
        # Loadbalancers can be uniquely identified by address and
        # port.  Old is a dict of all nodes the loadbalancer
        # currently knows about.
        old = dict(("{0.address}{0.port}".format(node), node)
                   for node in current_nodes)
        # New is a dict of the nodes the loadbalancer will know
        # about after this update.
        new = dict(("%s%s" % (node["address"], node[self.NODE_PORT]), node)
                   for node in diff_nodes)

        old_set = set(six.iterkeys(old))
        new_set = set(six.iterkeys(new))

        deleted = old_set.difference(new_set)
        added = new_set.difference(old_set)
        updated = new_set.intersection(old_set)

        if len(current_nodes) + len(added) - len(deleted) < 1:
            raise ValueError(
                _("The loadbalancer:%s requires at least one "
                  "node.") % self.name)
        """
        Add loadbalancers in the new map that are not in the old map.
        Add before delete to avoid deleting the last node and getting in
        an invalid state.
        """
        new_nodes = [self.clb.Node(**new[lb_node]) for lb_node in added]
        if new_nodes:
            checkers.append(scheduler.TaskRunner(add_nodes, lb, new_nodes))

        # Delete loadbalancers in the old dict that are not in the
        # new dict.
        for node in deleted:
            checkers.append(scheduler.TaskRunner(remove_node, old, node))

        # Update nodes that have been changed
        for node in updated:
            node_changed = False
            for attribute in six.iterkeys(new[node]):
                new_value = new[node][attribute]
                if new_value and new_value != getattr(old[node], attribute):
                    node_changed = True
                    setattr(old[node], attribute, new_value)
            if node_changed:
                checkers.append(scheduler.TaskRunner(update_node, old, node))

        return checkers

    def _update_lb_properties(self, lb, updated_props):
        @retry_if_immutable
        def update_lb():
            lb.update(**updated_props)

        return scheduler.TaskRunner(update_lb)

    def _update_health_monitor(self, lb, updated_hm):
        @retry_if_immutable
        def add_health_monitor():
            lb.add_health_monitor(**updated_hm)

        @retry_if_immutable
        def delete_health_monitor():
            lb.delete_health_monitor()

        if updated_hm is None:
            return scheduler.TaskRunner(delete_health_monitor)
        else:
            # Adding a health monitor is a destructive, so there's
            # no need to delete, then add
            return scheduler.TaskRunner(add_health_monitor)

    def _update_session_persistence(self, lb, updated_sp):
        @retry_if_immutable
        def add_session_persistence():
            lb.session_persistence = updated_sp

        @retry_if_immutable
        def delete_session_persistence():
            lb.session_persistence = ''

        if updated_sp is None:
            return scheduler.TaskRunner(delete_session_persistence)
        else:
            # Adding session persistence is destructive
            return scheduler.TaskRunner(add_session_persistence)

    def _update_ssl_termination(self, lb, updated_ssl_term):
        @retry_if_immutable
        def add_ssl_termination():
            lb.add_ssl_termination(**updated_ssl_term)

        @retry_if_immutable
        def delete_ssl_termination():
            lb.delete_ssl_termination()

        if updated_ssl_term is None:
            return scheduler.TaskRunner(delete_ssl_termination)
        else:
            # Adding SSL termination is destructive
            return scheduler.TaskRunner(add_ssl_termination)

    def _update_metadata(self, lb, updated_metadata):
        @retry_if_immutable
        def add_metadata():
            lb.set_metadata(updated_metadata)

        @retry_if_immutable
        def delete_metadata():
            lb.delete_metadata()

        if updated_metadata is None:
            return scheduler.TaskRunner(delete_metadata)
        else:
            return scheduler.TaskRunner(add_metadata)

    def _update_errorpage(self, lb, updated_errorpage):
        @retry_if_immutable
        def add_errorpage():
            lb.set_error_page(updated_errorpage)

        @retry_if_immutable
        def delete_errorpage():
            lb.clear_error_page()

        if updated_errorpage is None:
            return scheduler.TaskRunner(delete_errorpage)
        else:
            return scheduler.TaskRunner(add_errorpage)

    def _update_connection_logging(self, lb, updated_cl):
        @retry_if_immutable
        def enable_connection_logging():
            lb.connection_logging = True

        @retry_if_immutable
        def disable_connection_logging():
            lb.connection_logging = False

        if updated_cl:
            return scheduler.TaskRunner(enable_connection_logging)
        else:
            return scheduler.TaskRunner(disable_connection_logging)

    def _update_connection_throttle(self, lb, updated_ct):
        @retry_if_immutable
        def add_connection_throttle():
            lb.add_connection_throttle(**updated_ct)

        @retry_if_immutable
        def delete_connection_throttle():
            lb.delete_connection_throttle()

        if updated_ct is None:
            return scheduler.TaskRunner(delete_connection_throttle)
        else:
            return scheduler.TaskRunner(add_connection_throttle)

    def _update_content_caching(self, lb, updated_cc):
        @retry_if_immutable
        def enable_content_caching():
            lb.content_caching = True

        @retry_if_immutable
        def disable_content_caching():
            lb.content_caching = False

        if updated_cc == 'ENABLED':
            return scheduler.TaskRunner(enable_content_caching)
        else:
            return scheduler.TaskRunner(disable_content_caching)

    def check_update_complete(self, checkers):
        """Push all checkers to completion in list order."""
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def check_delete_complete(self, *args):
        if self.resource_id is None:
            return True

        try:
            loadbalancer = self.clb.get(self.resource_id)
        except NotFound:
            return True

        if loadbalancer.status == self.DELETED_STATUS:
            return True

        elif loadbalancer.status == self.PENDING_DELETE_STATUS:
            return False

        else:
            try:
                loadbalancer.delete()
            except Exception as exc:
                if lb_immutable(exc):
                    return False
                raise

        return False

    def _remove_none(self, property_dict):
        """Remove None values that would cause schema validation problems.

        These are values that may be initialized to None.
        """
        return dict((key, value)
                    for (key, value) in six.iteritems(property_dict)
                    if value is not None)

    def validate(self):
        """Validate any of the provided params."""
        res = super(CloudLoadBalancer, self).validate()
        if res:
            return res

        if self.properties.get(self.HALF_CLOSED):
            if not (self.properties[self.PROTOCOL] == 'TCP'
                    or self.properties[self.PROTOCOL] == 'TCP_CLIENT_FIRST'):
                message = (_('The %s property is only available for the TCP '
                             'or TCP_CLIENT_FIRST protocols') %
                           self.HALF_CLOSED)
                raise exception.StackValidationFailed(message=message)

        # health_monitor connect and http types require completely different
        # schema
        if self.properties.get(self.HEALTH_MONITOR):
            prop_val = self.properties[self.HEALTH_MONITOR]
            health_monitor = self._remove_none(prop_val)

            schema = self._health_monitor_schema
            if health_monitor[self.HEALTH_MONITOR_TYPE] == 'CONNECT':
                schema = dict((k, v) for k, v in schema.items()
                              if k in self._HEALTH_MONITOR_CONNECT_KEYS)
            properties.Properties(schema, health_monitor, function.resolve,
                                  self.name).validate()

        # validate if HTTPS_REDIRECT is true
        self._validate_https_redirect()
        # if a vip specifies and id, it can't specify version or type;
        # otherwise version and type are required
        for vip in self.properties.get(self.VIRTUAL_IPS, []):
            has_id = vip.get(self.VIRTUAL_IP_ID) is not None
            has_version = vip.get(self.VIRTUAL_IP_IP_VERSION) is not None
            has_type = vip.get(self.VIRTUAL_IP_TYPE) is not None
            if has_id:
                if (has_version or has_type):
                    message = _("Cannot specify type or version if VIP id is"
                                " specified.")
                    raise exception.StackValidationFailed(message=message)
            elif not (has_version and has_type):
                message = _("Must specify VIP type and version if no id "
                            "specified.")
                raise exception.StackValidationFailed(message=message)

    def _public_ip(self, lb):
        for ip in lb.virtual_ips:
            if ip.type == 'PUBLIC':
                return six.text_type(ip.address)

    def _resolve_attribute(self, key):
        if self.resource_id:
            lb = self.clb.get(self.resource_id)
            attribute_function = {
                self.PUBLIC_IP:
                self._public_ip(lb),
                self.VIPS: [{
                    "id": vip.id,
                    "type": vip.type,
                    "ip_version": vip.ip_version,
                    "address": vip.address
                } for vip in lb.virtual_ips]
            }
            if key not in attribute_function:
                raise exception.InvalidTemplateAttribute(resource=self.name,
                                                         key=key)
            function = attribute_function[key]
            LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'), {
                'name': self.name,
                'key': key,
                'function': function
            })
            return function
예제 #4
0
 def test_length_max_schema(self):
     d = {'length': {'max': 10}, 'description': 'a length range'}
     r = constraints.Length(max=10, description='a length range')
     self.assertEqual(d, dict(r))
예제 #5
0
class CloudNetwork(resource.Resource):
    """
    A resource for creating Rackspace Cloud Networks.

    See http://www.rackspace.com/cloud/networks/ for service
    documentation.
    """

    PROPERTIES = (LABEL, CIDR) = ("label", "cidr")

    properties_schema = {
        LABEL:
        properties.Schema(properties.Schema.STRING,
                          _("The name of the network."),
                          required=True,
                          constraints=[constraints.Length(min=3, max=64)]),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _("The IP block from which to allocate the network. For example, "
              "172.16.0.0/24 or 2001:DB8::/64."),
            required=True)
    }

    attributes_schema = {
        "cidr": _("The CIDR for an isolated private network."),
        "label": _("The name of the network.")
    }

    def __init__(self, name, json_snippet, stack):
        resource.Resource.__init__(self, name, json_snippet, stack)
        self._network = None

    def network(self):
        if self.resource_id and not self._network:
            try:
                self._network = self.cloud_networks().get(self.resource_id)
            except NotFound:
                logger.warn(
                    _("Could not find network %s but resource id "
                      "is set.") % self.resource_id)
        return self._network

    def cloud_networks(self):
        return self.stack.clients.cloud_networks()

    def handle_create(self):
        cnw = self.cloud_networks().create(label=self.properties[self.LABEL],
                                           cidr=self.properties[self.CIDR])
        self.resource_id_set(cnw.id)

    def handle_delete(self):
        net = self.network()
        if net:
            net.delete()
        return net

    def check_delete_complete(self, network):
        if network:
            try:
                network.get()
            except NotFound:
                return True
            else:
                return False
        return True

    def validate(self):
        super(CloudNetwork, self).validate()
        try:
            netaddr.IPNetwork(self.properties[self.CIDR])
        except netaddr.core.AddrFormatError:
            raise exception.StackValidationFailed(message=_("Invalid cidr"))

    def _resolve_attribute(self, name):
        net = self.network()
        if net:
            return unicode(getattr(net, name))
        return ""
예제 #6
0
 def test_length_max_fail(self):
     l = constraints.Length(max=5, description='a range')
     self.assertRaises(ValueError, l.validate, 'abcdef')
예제 #7
0
 def test_schema_validate_good(self):
     s = constraints.Schema(constraints.Schema.STRING,
                            'A string',
                            default='wibble',
                            constraints=[constraints.Length(4, 8)])
     self.assertIsNone(s.validate())
예제 #8
0
class OSDBInstance(resource.Resource):
    """OpenStack cloud database instance resource.

    Trove is Database as a Service for OpenStack. It's designed to run entirely
    on OpenStack, with the goal of allowing users to quickly and easily utilize
    the features of a relational or non-relational database without the burden
    of handling complex administrative tasks.
    """

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
        REPLICA_OF,
        REPLICA_COUNT,
    ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
         'restore_point', 'datastore_type', 'datastore_version', 'networks',
         'replica_of', 'replica_count')

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          update_allowed=True,
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          update_allowed=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IPv4 address for this NIC.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      update_allowed=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%',
                        update_allowed=True),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
        REPLICA_OF:
        properties.Schema(
            properties.Schema.STRING,
            _('Identifier of the source instance to replicate.'),
            support_status=support.SupportStatus(version='5.0.0')),
        REPLICA_COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of replicas to be created.'),
            support_status=support.SupportStatus(version='5.0.0')),
    }

    attributes_schema = {
        HOSTNAME:
        attributes.Schema(_("Hostname of the instance."),
                          type=attributes.Schema.STRING),
        HREF:
        attributes.Schema(_("Api endpoint reference of the instance."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'trove'

    entity = 'instances'

    def __init__(self, name, json_snippet, stack):
        super(OSDBInstance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.client().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        """Create cloud database instance."""
        self.flavor = self.client_plugin().find_flavor_by_name_or_id(
            self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties[self.DATABASES]
        self.users = self.properties[self.USERS]
        restore_point = self.properties[self.RESTORE_POINT]
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties[self.AVAILABILITY_ZONE]
        self.datastore_type = self.properties[self.DATASTORE_TYPE]
        self.datastore_version = self.properties[self.DATASTORE_VERSION]
        replica_of = self.properties[self.REPLICA_OF]
        replica_count = self.properties[self.REPLICA_COUNT]

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties[self.NICS]:
            nic_dict = {}
            net = nic.get(self.NET)
            if net:
                if self.is_using_neutron():
                    net_id = self.client_plugin(
                        'neutron').find_resourceid_by_name_or_id(
                            'network', net)
                else:
                    net_id = (
                        self.client_plugin('nova').get_nova_network_id(net))
                nic_dict['net-id'] = net_id
            port = nic.get(self.PORT)
            if port:
                neutron = self.client_plugin('neutron')
                nic_dict['port-id'] = neutron.find_resourceid_by_name_or_id(
                    'port', port)
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create db instance
        instance = self.client().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics,
            replica_of=replica_of,
            replica_count=replica_count)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.client().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warning(
                    _LW("Stack %(name)s (%(id)s) received an "
                        "OverLimit response during instance.get():"
                        " %(exception)s"), {
                            'name': self.stack.name,
                            'id': self.stack.id,
                            'exception': exc
                        })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        """Check if cloud DB instance creation is complete."""
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            _LI("Database instance %(database)s created (flavor:%("
                "flavor)s,volume:%(volume)s, datastore:%("
                "datastore_type)s, datastore_version:%("
                "datastore_version)s)"), {
                    'database': self._dbinstance_name(),
                    'flavor': self.flavor,
                    'volume': self.volume,
                    'datastore_type': self.datastore_type,
                    'datastore_version': self.datastore_version
                })
        return True

    def handle_check(self):
        instance = self.client().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        updates = {}
        if prop_diff:
            instance = self.client().instances.get(self.resource_id)
            if self.NAME in prop_diff:
                updates.update({self.NAME: prop_diff[self.NAME]})
            if self.FLAVOR in prop_diff:
                flvid = prop_diff[self.FLAVOR]
                flv = self.client_plugin().get_flavor_id(flvid)
                updates.update({self.FLAVOR: flv})
            if self.SIZE in prop_diff:
                updates.update({self.SIZE: prop_diff[self.SIZE]})
            if self.DATABASES in prop_diff:
                current = [
                    d.name for d in self.client().databases.list(instance)
                ]
                desired = [
                    d[self.DATABASE_NAME] for d in prop_diff[self.DATABASES]
                ]
                for db in prop_diff[self.DATABASES]:
                    dbname = db[self.DATABASE_NAME]
                    if dbname not in current:
                        db['ACTION'] = self.CREATE
                for dbname in current:
                    if dbname not in desired:
                        deleted = {
                            self.DATABASE_NAME: dbname,
                            'ACTION': self.DELETE
                        }
                        prop_diff[self.DATABASES].append(deleted)
                updates.update({self.DATABASES: prop_diff[self.DATABASES]})
            if self.USERS in prop_diff:
                current = [u.name for u in self.client().users.list(instance)]
                desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]]
                for usr in prop_diff[self.USERS]:
                    if usr[self.USER_NAME] not in current:
                        usr['ACTION'] = self.CREATE
                for usr in current:
                    if usr not in desired:
                        prop_diff[self.USERS].append({
                            self.USER_NAME: usr,
                            'ACTION': self.DELETE
                        })
                updates.update({self.USERS: prop_diff[self.USERS]})
        return updates

    def check_update_complete(self, updates):
        instance = self.client().instances.get(self.resource_id)
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))
        if updates:
            if instance.status != self.ACTIVE:
                dmsg = ("Instance is in status %(now)s. Waiting on status"
                        " %(stat)s")
                LOG.debug(dmsg % {"now": instance.status, "stat": self.ACTIVE})
                return False
            try:
                return (
                    self._update_name(instance, updates.get(self.NAME))
                    and self._update_flavor(instance, updates.get(self.FLAVOR))
                    and self._update_size(instance, updates.get(self.SIZE))
                    and self._update_databases(instance,
                                               updates.get(self.DATABASES))
                    and self._update_users(instance, updates.get(self.USERS)))
            except Exception as exc:
                if self.client_plugin().is_client_exception(exc):
                    # the instance could have updated between the time
                    # we retrieve it and try to update it so check again
                    if self.client_plugin().is_over_limit(exc):
                        LOG.debug("API rate limit: %(ex)s. Retrying." %
                                  {'ex': six.text_type(exc)})
                        return False
                    if "No change was requested" in six.text_type(exc):
                        LOG.warning(
                            _LW("Unexpected instance state change "
                                "during update. Retrying."))
                        return False
                raise
        return True

    def _update_name(self, instance, name):
        if name and instance.name != name:
            self.client().instances.edit(instance, name=name)
            return False
        return True

    def _update_flavor(self, instance, new_flavor):
        if new_flavor:
            current_flav = six.text_type(instance.flavor['id'])
            new_flav = six.text_type(new_flavor)
            if new_flav != current_flav:
                dmsg = "Resizing instance flavor from %(old)s to %(new)s"
                LOG.debug(dmsg % {"old": current_flav, "new": new_flav})
                self.client().instances.resize_instance(instance, new_flavor)
                return False
        return True

    def _update_size(self, instance, new_size):
        if new_size and instance.volume['size'] != new_size:
            dmsg = "Resizing instance storage from %(old)s to %(new)s"
            LOG.debug(dmsg % {"old": instance.volume['size'], "new": new_size})
            self.client().instances.resize_volume(instance, new_size)
            return False
        return True

    def _update_databases(self, instance, databases):
        if databases:
            for db in databases:
                if db.get("ACTION") == self.CREATE:
                    db.pop("ACTION", None)
                    dmsg = "Adding new database %(db)s to instance"
                    LOG.debug(dmsg % {"db": db})
                    self.client().databases.create(instance, [db])
                elif db.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing database %(db)s from "
                            "instance")
                    LOG.debug(dmsg % {"db": db['name']})
                    self.client().databases.delete(instance, db['name'])
        return True

    def _update_users(self, instance, users):
        if users:
            for usr in users:
                dbs = [{'name': db} for db in usr.get(self.USER_DATABASES, [])]
                usr[self.USER_DATABASES] = dbs
                if usr.get("ACTION") == self.CREATE:
                    usr.pop("ACTION", None)
                    dmsg = "Adding new user %(u)s to instance"
                    LOG.debug(dmsg % {"u": usr})
                    self.client().users.create(instance, [usr])
                elif usr.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing user %(u)s from " "instance")
                    LOG.debug(dmsg % {"u": usr['name']})
                    self.client().users.delete(instance, usr['name'])
                else:
                    newattrs = {}
                    if usr.get(self.USER_HOST):
                        newattrs[self.USER_HOST] = usr[self.USER_HOST]
                    if usr.get(self.USER_PASSWORD):
                        newattrs[self.USER_PASSWORD] = usr[self.USER_PASSWORD]
                    if newattrs:
                        self.client().users.update_attributes(
                            instance,
                            usr['name'],
                            newuserattr=newattrs,
                            hostname=instance.hostname)
                    current = self.client().users.get(instance,
                                                      usr[self.USER_NAME])
                    dbs = [db['name'] for db in current.databases]
                    desired = [
                        db['name'] for db in usr.get(self.USER_DATABASES, [])
                    ]
                    grants = [db for db in desired if db not in dbs]
                    revokes = [db for db in dbs if db not in desired]
                    if grants:
                        self.client().users.grant(instance,
                                                  usr[self.USER_NAME], grants)
                    if revokes:
                        self.client().users.revoke(instance,
                                                   usr[self.USER_NAME],
                                                   revokes)
        return True

    def handle_delete(self):
        """Delete a cloud database instance."""
        if not self.resource_id:
            return

        try:
            instance = self.client().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        """Check for completion of cloud DB instance deletion."""
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        """Validate any of the provided params."""
        res = super(OSDBInstance, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties[self.USERS]
        if users:
            databases = self.properties[self.DATABASES]
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties[self.NICS]
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
예제 #9
0
class CloudNetwork(resource.Resource):
    """A resource for creating Rackspace Cloud Networks.

    See http://www.rackspace.com/cloud/networks/ for service
    documentation.
    """

    support_status = support.SupportStatus(
        status=support.DEPRECATED,
        message=_('Use OS::Neutron::Net instead.'),
        version='2015.1',
        previous_status=support.SupportStatus(version='2014.1'))

    PROPERTIES = (LABEL, CIDR) = ("label", "cidr")

    ATTRIBUTES = (
        CIDR_ATTR,
        LABEL_ATTR,
    ) = (
        'cidr',
        'label',
    )

    properties_schema = {
        LABEL:
        properties.Schema(properties.Schema.STRING,
                          _("The name of the network."),
                          required=True,
                          constraints=[constraints.Length(min=3, max=64)]),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _("The IP block from which to allocate the network. For example, "
              "172.16.0.0/24 or 2001:DB8::/64."),
            required=True,
            constraints=[constraints.CustomConstraint('net_cidr')])
    }

    attributes_schema = {
        CIDR_ATTR:
        attributes.Schema(_("The CIDR for an isolated private network.")),
        LABEL_ATTR:
        attributes.Schema(_("The name of the network.")),
    }

    def __init__(self, name, json_snippet, stack):
        resource.Resource.__init__(self, name, json_snippet, stack)
        self._network = None
        self._delete_issued = False

    def network(self):
        if self.resource_id and not self._network:
            try:
                self._network = self.cloud_networks().get(self.resource_id)
            except NotFound:
                LOG.warn(
                    _LW("Could not find network %s but resource id is"
                        " set."), self.resource_id)
        return self._network

    def cloud_networks(self):
        return self.client('cloud_networks')

    def handle_create(self):
        cnw = self.cloud_networks().create(label=self.properties[self.LABEL],
                                           cidr=self.properties[self.CIDR])
        self.resource_id_set(cnw.id)

    def handle_check(self):
        self.cloud_networks().get(self.resource_id)

    def check_delete_complete(self, cookie):
        try:
            network = self.cloud_networks().get(self.resource_id)
        except NotFound:
            return True

        if not network:
            return True

        if not self._delete_issued:
            try:
                network.delete()
            except NetworkInUse:
                LOG.warn(_LW("Network '%s' still in use."), network.id)
            else:
                self._delete_issued = True
            return False

        return False

    def validate(self):
        super(CloudNetwork, self).validate()

    def _resolve_attribute(self, name):
        net = self.network()
        if net:
            return six.text_type(getattr(net, name))
        return ""
예제 #10
0
class SaharaNodeGroupTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
                  VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE, SECURITY_GROUPS,
                  AUTO_SECURITY_GROUP, AVAILABILITY_ZONE,
                  VOLUMES_AVAILABILITY_ZONE, NODE_PROCESSES, FLOATING_IP_POOL,
                  NODE_CONFIGS, IMAGE_ID, IS_PROXY_GATEWAY,
                  VOLUME_LOCAL_TO_INSTANCE, USE_AUTOCONFIG) = (
                      'name', 'plugin_name', 'hadoop_version', 'flavor',
                      'description', 'volumes_per_node', 'volumes_size',
                      'volume_type', 'security_groups', 'auto_security_group',
                      'availability_zone', 'volumes_availability_zone',
                      'node_processes', 'floating_ip_pool', 'node_configs',
                      'image_id', 'is_proxy_gateway',
                      'volume_local_to_instance', 'use_autoconfig')

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Node Group Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Description of the Node Group Template.'),
            default="",
        ),
        PLUGIN_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or ID Nova flavor for the nodes.'),
            required=True,
            constraints=[constraints.CustomConstraint('nova.flavor')]),
        VOLUMES_PER_NODE:
        properties.Schema(
            properties.Schema.INTEGER,
            _("Volumes per node."),
            constraints=[
                constraints.Range(min=0),
            ],
        ),
        VOLUMES_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _("Size of the volumes, in GB."),
            constraints=[
                constraints.Range(min=1),
            ],
        ),
        VOLUME_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _("Type of the volume to create on Cinder backend."),
            constraints=[constraints.CustomConstraint('cinder.vtype')]),
        SECURITY_GROUPS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of security group names or IDs to assign to this "
              "Node Group template."),
            schema=properties.Schema(properties.Schema.STRING, ),
        ),
        AUTO_SECURITY_GROUP:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Defines whether auto-assign security group to this "
              "Node Group template."),
        ),
        AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create servers in."),
        ),
        VOLUMES_AVAILABILITY_ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create volumes in."),
        ),
        NODE_PROCESSES:
        properties.Schema(
            properties.Schema.LIST,
            _("List of processes to run on every node."),
            required=True,
            constraints=[
                constraints.Length(min=1),
            ],
            schema=properties.Schema(properties.Schema.STRING, ),
        ),
        FLOATING_IP_POOL:
        properties.Schema(
            properties.Schema.STRING,
            _("Name or UUID of the Neutron floating IP network or "
              "name of the Nova floating ip pool to use. "
              "Should not be provided when used with Nova-network "
              "that auto-assign floating IPs."),
        ),
        NODE_CONFIGS:
        properties.Schema(
            properties.Schema.MAP,
            _("Dictionary of node configurations."),
        ),
        IMAGE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _("ID of the image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
        ),
        IS_PROXY_GATEWAY:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Provide access to nodes using other nodes of the cluster "
              "as proxy gateways."),
            support_status=support.SupportStatus(version='5.0.0')),
        VOLUME_LOCAL_TO_INSTANCE:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Create volumes on the same physical port as an instance."),
            support_status=support.SupportStatus(version='5.0.0')),
        USE_AUTOCONFIG:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0'))
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'node_group_templates'

    def _ngt_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        node_processes = self.properties[self.NODE_PROCESSES]
        description = self.properties[self.DESCRIPTION]
        flavor_id = self.client_plugin("nova").get_flavor_id(
            self.properties[self.FLAVOR])
        volumes_per_node = self.properties[self.VOLUMES_PER_NODE]
        volumes_size = self.properties[self.VOLUMES_SIZE]
        volume_type = self.properties[self.VOLUME_TYPE]
        floating_ip_pool = self.properties[self.FLOATING_IP_POOL]
        security_groups = self.properties[self.SECURITY_GROUPS]
        auto_security_group = self.properties[self.AUTO_SECURITY_GROUP]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]
        vol_availability_zone = self.properties[self.VOLUMES_AVAILABILITY_ZONE]
        image_id = self.properties[self.IMAGE_ID]
        if floating_ip_pool and self.is_using_neutron():
            floating_ip_pool = self.client_plugin(
                'neutron').find_neutron_resource(self.properties,
                                                 self.FLOATING_IP_POOL,
                                                 'network')
        node_configs = self.properties[self.NODE_CONFIGS]
        is_proxy_gateway = self.properties[self.IS_PROXY_GATEWAY]
        volume_local_to_instance = self.properties[
            self.VOLUME_LOCAL_TO_INSTANCE]
        use_autoconfig = self.properties[self.USE_AUTOCONFIG]

        node_group_template = self.client().node_group_templates.create(
            self._ngt_name(),
            plugin_name,
            hadoop_version,
            flavor_id,
            description=description,
            volumes_per_node=volumes_per_node,
            volumes_size=volumes_size,
            volume_type=volume_type,
            node_processes=node_processes,
            floating_ip_pool=floating_ip_pool,
            node_configs=node_configs,
            security_groups=security_groups,
            auto_security_group=auto_security_group,
            availability_zone=availability_zone,
            volumes_availability_zone=vol_availability_zone,
            image_id=image_id,
            is_proxy_gateway=is_proxy_gateway,
            volume_local_to_instance=volume_local_to_instance,
            use_autoconfig=use_autoconfig)
        LOG.info(_LI("Node Group Template '%s' has been created"),
                 node_group_template.name)
        self.resource_id_set(node_group_template.id)
        return self.resource_id

    def validate(self):
        res = super(SaharaNodeGroupTemplate, self).validate()
        if res:
            return res
        pool = self.properties[self.FLOATING_IP_POOL]
        if pool:
            if self.is_using_neutron():
                try:
                    self.client_plugin('neutron').find_neutron_resource(
                        self.properties, self.FLOATING_IP_POOL, 'network')
                except Exception as ex:
                    if (self.client_plugin('neutron').is_not_found(ex)
                            or self.client_plugin('neutron').is_no_unique(ex)):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise
            else:
                try:
                    self.client('nova').floating_ip_pools.find(name=pool)
                except Exception as ex:
                    if self.client_plugin('nova').is_not_found(ex):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise
예제 #11
0
class SubnetPool(neutron.NeutronResource):
    """A resource that implements neutron subnet pool.

    This resource can be used to create a subnet pool with a large block
    of addresses and create subnets from it.
    """

    support_status = support.SupportStatus(version='6.0.0')

    required_service_extension = 'subnet_allocation'

    entity = 'subnetpool'

    PROPERTIES = (
        NAME,
        PREFIXES,
        ADDRESS_SCOPE,
        DEFAULT_QUOTA,
        DEFAULT_PREFIXLEN,
        MIN_PREFIXLEN,
        MAX_PREFIXLEN,
        IS_DEFAULT,
        TENANT_ID,
        SHARED,
        TAGS,
    ) = (
        'name',
        'prefixes',
        'address_scope',
        'default_quota',
        'default_prefixlen',
        'min_prefixlen',
        'max_prefixlen',
        'is_default',
        'tenant_id',
        'shared',
        'tags',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the subnet pool.'),
                          update_allowed=True),
        PREFIXES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of subnet prefixes to assign.'),
            schema=properties.Schema(
                properties.Schema.STRING,
                constraints=[
                    constraints.CustomConstraint('net_cidr'),
                ],
            ),
            constraints=[constraints.Length(min=1)],
            required=True,
            update_allowed=True,
        ),
        ADDRESS_SCOPE:
        properties.Schema(
            properties.Schema.STRING,
            _('An address scope ID to assign to the subnet pool.'),
            constraints=[
                constraints.CustomConstraint('neutron.address_scope')
            ],
            update_allowed=True,
        ),
        DEFAULT_QUOTA:
        properties.Schema(
            properties.Schema.INTEGER,
            _('A per-tenant quota on the prefix space that can be allocated '
              'from the subnet pool for tenant subnets.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        DEFAULT_PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The size of the prefix to allocate when the cidr or '
              'prefixlen attributes are not specified while creating '
              'a subnet.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        MIN_PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Smallest prefix size that can be allocated '
              'from the subnet pool.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        MAX_PREFIXLEN:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Maximum prefix size that can be allocated '
              'from the subnet pool.'),
            constraints=[constraints.Range(min=0)],
            update_allowed=True,
        ),
        IS_DEFAULT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether this is default IPv4/IPv6 subnet pool. '
              'There can only be one default subnet pool for each IP family. '
              'Note that the default policy setting restricts administrative '
              'users to set this to True.'),
            default=False,
            update_allowed=True,
        ),
        TENANT_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the tenant who owns the subnet pool. Only '
              'administrative users can specify a tenant ID '
              'other than their own.')),
        SHARED:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the subnet pool will be shared across all tenants. '
              'Note that the default policy setting restricts usage of this '
              'attribute to administrative users only.'),
            default=False,
        ),
        TAGS:
        properties.Schema(
            properties.Schema.LIST,
            _('The tags to be added to the subnetpool.'),
            schema=properties.Schema(properties.Schema.STRING),
            update_allowed=True,
            support_status=support.SupportStatus(version='9.0.0')),
    }

    def validate(self):
        super(SubnetPool, self).validate()
        self._validate_prefix_bounds()

    def _validate_prefix_bounds(self):
        min_prefixlen = self.properties[self.MIN_PREFIXLEN]
        default_prefixlen = self.properties[self.DEFAULT_PREFIXLEN]
        max_prefixlen = self.properties[self.MAX_PREFIXLEN]
        msg_fmt = _('Illegal prefix bounds: %(key1)s=%(value1)s, '
                    '%(key2)s=%(value2)s.')
        # min_prefixlen can not be greater than max_prefixlen
        if min_prefixlen and max_prefixlen and min_prefixlen > max_prefixlen:
            msg = msg_fmt % dict(key1=self.MAX_PREFIXLEN,
                                 value1=max_prefixlen,
                                 key2=self.MIN_PREFIXLEN,
                                 value2=min_prefixlen)
            raise exception.StackValidationFailed(message=msg)

        if default_prefixlen:
            # default_prefixlen can not be greater than max_prefixlen
            if max_prefixlen and default_prefixlen > max_prefixlen:
                msg = msg_fmt % dict(key1=self.MAX_PREFIXLEN,
                                     value1=max_prefixlen,
                                     key2=self.DEFAULT_PREFIXLEN,
                                     value2=default_prefixlen)
                raise exception.StackValidationFailed(message=msg)
            # min_prefixlen can not be greater than default_prefixlen
            if min_prefixlen and min_prefixlen > default_prefixlen:
                msg = msg_fmt % dict(key1=self.MIN_PREFIXLEN,
                                     value1=min_prefixlen,
                                     key2=self.DEFAULT_PREFIXLEN,
                                     value2=default_prefixlen)
                raise exception.StackValidationFailed(message=msg)

    def _validate_prefixes_for_update(self, prop_diff):
        old_prefixes = self.properties[self.PREFIXES]
        new_prefixes = prop_diff[self.PREFIXES]
        # check new_prefixes is a superset of old_prefixes
        if not netutils.is_prefix_subset(old_prefixes, new_prefixes):
            msg = (_('Property %(key)s updated value %(new)s should '
                     'be superset of existing value '
                     '%(old)s.') % dict(key=self.PREFIXES,
                                        new=sorted(new_prefixes),
                                        old=sorted(old_prefixes)))
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        if self.ADDRESS_SCOPE in props and props[self.ADDRESS_SCOPE]:
            client_plugin = self.client_plugin()
            scope_id = client_plugin.find_resourceid_by_name_or_id(
                client_plugin.RES_TYPE_ADDRESS_SCOPE,
                props.pop(self.ADDRESS_SCOPE))
            props['address_scope_id'] = scope_id
        tags = props.pop(self.TAGS, [])
        subnetpool = self.client().create_subnetpool({'subnetpool':
                                                      props})['subnetpool']
        self.resource_id_set(subnetpool['id'])

        if tags:
            self.set_tags(tags)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().delete_subnetpool(self.resource_id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        # check that new prefixes are superset of existing prefixes
        if self.PREFIXES in prop_diff:
            self._validate_prefixes_for_update(prop_diff)
        if self.ADDRESS_SCOPE in prop_diff:
            if prop_diff[self.ADDRESS_SCOPE]:
                client_plugin = self.client_plugin()
                scope_id = client_plugin.find_resourceid_by_name_or_id(
                    self.client(), client_plugin.RES_TYPE_ADDRESS_SCOPE,
                    prop_diff.pop(self.ADDRESS_SCOPE))
            else:
                scope_id = prop_diff.pop(self.ADDRESS_SCOPE)
            prop_diff['address_scope_id'] = scope_id
        if self.TAGS in prop_diff:
            tags = prop_diff.pop(self.TAGS)
            self.set_tags(tags)
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_subnetpool(self.resource_id,
                                            {'subnetpool': prop_diff})
예제 #12
0
class SaharaClusterTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME,
        PLUGIN_NAME,
        HADOOP_VERSION,
        DESCRIPTION,
        ANTI_AFFINITY,
        MANAGEMENT_NETWORK,
        CLUSTER_CONFIGS,
        NODE_GROUPS,
        IMAGE_ID,
    ) = (
        'name',
        'plugin_name',
        'hadoop_version',
        'description',
        'anti_affinity',
        'neutron_management_network',
        'cluster_configs',
        'node_groups',
        'default_image_id',
    )

    _NODE_GROUP_KEYS = (
        NG_NAME,
        COUNT,
        NG_TEMPLATE_ID,
    ) = (
        'name',
        'count',
        'node_group_template_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Cluster Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Description of the Sahara Group Template.'),
            default="",
        ),
        PLUGIN_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        IMAGE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _("ID of the default image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
        ),
        MANAGEMENT_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        ANTI_AFFINITY:
        properties.Schema(
            properties.Schema.LIST,
            _("List of processes to enable anti-affinity for."),
            schema=properties.Schema(properties.Schema.STRING, ),
        ),
        CLUSTER_CONFIGS:
        properties.Schema(
            properties.Schema.MAP,
            _('Cluster configs dictionary.'),
        ),
        NODE_GROUPS:
        properties.Schema(
            properties.Schema.LIST,
            _('Node groups.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NG_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      _('Name of the Node group.'),
                                      required=True),
                    COUNT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _("Number of instances in the Node group."),
                        required=True,
                        constraints=[constraints.Range(min=1)]),
                    NG_TEMPLATE_ID:
                    properties.Schema(properties.Schema.STRING,
                                      _("ID of the Node Group Template."),
                                      required=True),
                }),
        ),
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'cluster_templates'

    def _cluster_template_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        description = self.properties[self.DESCRIPTION]
        image_id = self.properties[self.IMAGE_ID]
        net_id = self.properties[self.MANAGEMENT_NETWORK]
        if net_id:
            if self.is_using_neutron():
                net_id = self.client_plugin('neutron').find_neutron_resource(
                    self.properties, self.MANAGEMENT_NETWORK, 'network')
            else:
                net_id = self.client_plugin('nova').get_nova_network_id(net_id)
        anti_affinity = self.properties[self.ANTI_AFFINITY]
        cluster_configs = self.properties[self.CLUSTER_CONFIGS]
        node_groups = self.properties[self.NODE_GROUPS]
        cluster_template = self.client().cluster_templates.create(
            self._cluster_template_name(),
            plugin_name,
            hadoop_version,
            description=description,
            default_image_id=image_id,
            anti_affinity=anti_affinity,
            net_id=net_id,
            cluster_configs=cluster_configs,
            node_groups=node_groups)
        LOG.info(_LI("Cluster Template '%s' has been created"),
                 cluster_template.name)
        self.resource_id_set(cluster_template.id)
        return self.resource_id

    def validate(self):
        res = super(SaharaClusterTemplate, self).validate()
        if res:
            return res
        # check if running on neutron and MANAGEMENT_NETWORK missing
        if (self.is_using_neutron()
                and not self.properties[self.MANAGEMENT_NETWORK]):
            msg = _("%s must be provided") % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)
예제 #13
0
class KeyPair(resource.Resource):
    """A resource for creating Nova key pairs.

    A keypair is a ssh key that can be injected into a server on launch.

    **Note** that if a new key is generated setting `save_private_key` to
    `True` results in the system saving the private key which can then be
    retrieved via the `private_key` attribute of this resource.

    Setting the `public_key` property means that the `private_key` attribute
    of this resource will always return an empty string regardless of the
    `save_private_key` setting since there will be no private key data to
    save.
    """

    support_status = support.SupportStatus(version='2014.1')

    required_service_extension = 'os-keypairs'

    PROPERTIES = (
        NAME, SAVE_PRIVATE_KEY, PUBLIC_KEY,
    ) = (
        'name', 'save_private_key', 'public_key',
    )

    ATTRIBUTES = (
        PUBLIC_KEY_ATTR, PRIVATE_KEY_ATTR,
    ) = (
        'public_key', 'private_key',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('The name of the key pair.'),
            required=True,
            constraints=[
                constraints.Length(min=1, max=255)
            ]
        ),
        SAVE_PRIVATE_KEY: properties.Schema(
            properties.Schema.BOOLEAN,
            _('True if the system should remember a generated private key; '
              'False otherwise.'),
            default=False
        ),
        PUBLIC_KEY: properties.Schema(
            properties.Schema.STRING,
            _('The optional public key. This allows users to supply the '
              'public key from a pre-existing key pair. If not supplied, a '
              'new key pair will be generated.')
        ),
    }

    attributes_schema = {
        PUBLIC_KEY_ATTR: attributes.Schema(
            _('The public key.'),
            type=attributes.Schema.STRING
        ),
        PRIVATE_KEY_ATTR: attributes.Schema(
            _('The private key if it has been saved.'),
            cache_mode=attributes.Schema.CACHE_NONE,
            type=attributes.Schema.STRING
        ),
    }

    default_client_name = 'nova'

    entity = 'keypairs'

    def __init__(self, name, json_snippet, stack):
        super(KeyPair, self).__init__(name, json_snippet, stack)
        self._public_key = None

    @property
    def private_key(self):
        """Return the private SSH key for the resource."""
        if self.properties[self.SAVE_PRIVATE_KEY]:
            return self.data().get('private_key', '')
        else:
            return ''

    @property
    def public_key(self):
        """Return the public SSH key for the resource."""
        if not self._public_key:
            if self.properties[self.PUBLIC_KEY]:
                self._public_key = self.properties[self.PUBLIC_KEY]
            elif self.resource_id:
                nova_key = self.client_plugin().get_keypair(self.resource_id)
                self._public_key = nova_key.public_key
        return self._public_key

    def handle_create(self):
        pub_key = self.properties[self.PUBLIC_KEY] or None
        new_keypair = self.client().keypairs.create(self.properties[self.NAME],
                                                    public_key=pub_key)
        if (self.properties[self.SAVE_PRIVATE_KEY] and
                hasattr(new_keypair, 'private_key')):
            self.data_set('private_key',
                          new_keypair.private_key,
                          True)
        self.resource_id_set(new_keypair.id)

    def handle_check(self):
        self.client().keypairs.get(self.resource_id)

    def _resolve_attribute(self, key):
        attr_fn = {self.PRIVATE_KEY_ATTR: self.private_key,
                   self.PUBLIC_KEY_ATTR: self.public_key}
        return six.text_type(attr_fn[key])

    def get_reference_id(self):
        return self.resource_id
예제 #14
0
class MonascaNotification(resource.Resource):
    """Heat Template Resource for Monasca Notification.

    A resource which is used to notificate if there is some alarm.
    Monasca Notification helps to declare the hook points, which will be
    invoked once alarm is generated. This plugin helps to create, update and
    delete the notification.
    """

    support_status = support.SupportStatus(
        version='7.0.0',
        previous_status=support.SupportStatus(
            version='5.0.0',
            status=support.UNSUPPORTED
        ))

    default_client_name = 'monasca'

    entity = 'notifications'

    # NOTE(sirushti): To conform to the autoscaling behaviour in heat, we set
    # the default period interval during create/update to 60 for webhooks only.
    _default_period_interval = 60

    NOTIFICATION_TYPES = (
        EMAIL, WEBHOOK, PAGERDUTY
    ) = (
        'email', 'webhook', 'pagerduty'
    )

    PROPERTIES = (
        NAME, TYPE, ADDRESS, PERIOD
    ) = (
        'name', 'type', 'address', 'period'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the notification. By default, physical resource name '
              'is used.'),
            update_allowed=True
        ),
        TYPE: properties.Schema(
            properties.Schema.STRING,
            _('Type of the notification.'),
            update_allowed=True,
            required=True,
            constraints=[constraints.AllowedValues(
                NOTIFICATION_TYPES
            )]
        ),
        ADDRESS: properties.Schema(
            properties.Schema.STRING,
            _('Address of the notification. It could be a valid email '
              'address, url or service key based on notification type.'),
            update_allowed=True,
            required=True,
            constraints=[constraints.Length(max=512)]
        ),
        PERIOD: properties.Schema(
            properties.Schema.INTEGER,
            _('Interval in seconds to invoke webhooks if the alarm state '
              'does not transition away from the defined trigger state. A '
              'value of 0 will disable continuous notifications. This '
              'property is only applicable for the webhook notification '
              'type and has default period interval of 60 seconds.'),
            support_status=support.SupportStatus(version='7.0.0'),
            update_allowed=True,
            constraints=[constraints.AllowedValues([0, 60])]
        )
    }

    def _period_interval(self):
        period = self.properties[self.PERIOD]
        if period is None:
            period = self._default_period_interval
        return period

    def validate(self):
        super(MonascaNotification, self).validate()
        if self.properties[self.PERIOD] is not None and (
                self.properties[self.TYPE] != self.WEBHOOK):
            msg = _('The period property can only be specified against a '
                    'Webhook Notification type.')
            raise exception.StackValidationFailed(message=msg)

        address = self.properties[self.ADDRESS]
        if not address:
            return

        if self.properties[self.TYPE] == self.WEBHOOK:
            try:
                parsed_address = urllib.parse.urlparse(address)
            except Exception:
                msg = _('Address "%(addr)s" should have correct format '
                        'required by "%(wh)s" type of "%(type)s" '
                        'property') % {
                    'addr': address,
                    'wh': self.WEBHOOK,
                    'type': self.TYPE}
                raise exception.StackValidationFailed(message=msg)
            if not parsed_address.scheme:
                msg = _('Address "%s" doesn\'t have required URL '
                        'scheme') % address
                raise exception.StackValidationFailed(message=msg)
            if not parsed_address.netloc:
                msg = _('Address "%s" doesn\'t have required network '
                        'location') % address
                raise exception.StackValidationFailed(message=msg)
            if parsed_address.scheme not in ['http', 'https']:
                msg = _('Address "%(addr)s" doesn\'t satisfies '
                        'allowed schemes: %(schemes)s') % {
                    'addr': address,
                    'schemes': ', '.join(['http', 'https'])
                }
                raise exception.StackValidationFailed(message=msg)
        elif (self.properties[self.TYPE] == self.EMAIL and
              not re.match('^\S+@\S+$', address)):
            msg = _('Address "%(addr)s" doesn\'t satisfies allowed format for '
                    '"%(email)s" type of "%(type)s" property') % {
                'addr': address,
                'email': self.EMAIL,
                'type': self.TYPE}
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        args = dict(
            name=(self.properties[self.NAME] or
                  self.physical_resource_name()),
            type=self.properties[self.TYPE],
            address=self.properties[self.ADDRESS],
        )
        if args['type'] == self.WEBHOOK:
            args['period'] = self._period_interval()

        notification = self.client().notifications.create(**args)
        self.resource_id_set(notification['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict(notification_id=self.resource_id)

        args['name'] = (prop_diff.get(self.NAME) or
                        self.properties[self.NAME])

        args['type'] = (prop_diff.get(self.TYPE) or
                        self.properties[self.TYPE])

        args['address'] = (prop_diff.get(self.ADDRESS) or
                           self.properties[self.ADDRESS])

        if args['type'] == self.WEBHOOK:
            updated_period = prop_diff.get(self.PERIOD)
            args['period'] = (updated_period if updated_period is not None
                              else self._period_interval())

        self.client().notifications.update(**args)

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().notifications.delete(
                    notification_id=self.resource_id)

    # FIXME(kanagaraj-manickam) Remove this method once monasca defect 1484900
    # is fixed.
    def _show_resource(self):
        return self.client().notifications.get(self.resource_id)
예제 #15
0
class SaharaClusterTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, DESCRIPTION,
        ANTI_AFFINITY, MANAGEMENT_NETWORK,
        CLUSTER_CONFIGS, NODE_GROUPS, IMAGE_ID, USE_AUTOCONFIG,
        SHARES
    ) = (
        'name', 'plugin_name', 'hadoop_version', 'description',
        'anti_affinity', 'neutron_management_network',
        'cluster_configs', 'node_groups', 'default_image_id', 'use_autoconfig',
        'shares'
    )

    _NODE_GROUP_KEYS = (
        NG_NAME, COUNT, NG_TEMPLATE_ID,
    ) = (
        'name', 'count', 'node_group_template_id',
    )

    _SHARE_KEYS = (
        SHARE_ID, PATH, ACCESS_LEVEL
    ) = (
        'id', 'path', 'access_level'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Cluster Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Sahara Group Template.'),
            default="",
            update_allowed=True
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ],
            update_allowed=True
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
            update_allowed=True
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the default image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            update_allowed=True
        ),
        MANAGEMENT_NETWORK: properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
            update_allowed=True
        ),
        ANTI_AFFINITY: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to enable anti-affinity for."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        CLUSTER_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _('Cluster configs dictionary.'),
            update_allowed=True
        ),
        NODE_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _('Node groups.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NG_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the Node group.'),
                        required=True
                    ),
                    COUNT: properties.Schema(
                        properties.Schema.INTEGER,
                        _("Number of instances in the Node group."),
                        required=True,
                        constraints=[
                            constraints.Range(min=1)
                        ]
                    ),
                    NG_TEMPLATE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("ID of the Node Group Template."),
                        required=True
                    ),
                }
            ),
            update_allowed=True
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0')
        ),
        SHARES: properties.Schema(
            properties.Schema.LIST,
            _("List of manila shares to be mounted."),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SHARE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("Id of the manila share."),
                        required=True
                    ),
                    PATH: properties.Schema(
                        properties.Schema.STRING,
                        _("Local path on each cluster node on which to mount "
                          "the share. Defaults to '/mnt/{share_id}'.")
                    ),
                    ACCESS_LEVEL: properties.Schema(
                        properties.Schema.STRING,
                        _("Governs permissions set in manila for the cluster "
                          "ips."),
                        constraints=[
                            constraints.AllowedValues(['rw', 'ro']),
                        ],
                        default='rw'
                    )
                }
            ),
            support_status=support.SupportStatus(version='6.0.0'),
            update_allowed=True
        )
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'cluster_templates'

    def _cluster_template_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def _prepare_properties(self):
        props = {
            'name': self._cluster_template_name(),
            'plugin_name': self.properties[self.PLUGIN_NAME],
            'hadoop_version': self.properties[self.HADOOP_VERSION],
            'description': self.properties[self.DESCRIPTION],
            'cluster_configs': self.properties[self.CLUSTER_CONFIGS],
            'node_groups': self.properties[self.NODE_GROUPS],
            'anti_affinity': self.properties[self.ANTI_AFFINITY],
            'net_id': self.properties[self.MANAGEMENT_NETWORK],
            'default_image_id': self.properties[self.IMAGE_ID],
            'use_autoconfig': self.properties[self.USE_AUTOCONFIG],
            'shares': self.properties[self.SHARES]
        }
        if props['net_id']:
            if self.is_using_neutron():
                props['net_id'] = self.client_plugin(
                    'neutron').find_neutron_resource(
                    self.properties, self.MANAGEMENT_NETWORK, 'network')
            else:
                props['net_id'] = self.client_plugin(
                    'nova').get_nova_network_id(props['net_id'])
        return props

    def handle_create(self):
        args = self._prepare_properties()
        cluster_template = self.client().cluster_templates.create(**args)
        LOG.info(_LI("Cluster Template '%s' has been created"),
                 cluster_template.name)
        self.resource_id_set(cluster_template.id)
        return self.resource_id

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.properties = json_snippet.properties(
                self.properties_schema,
                self.context)
            args = self._prepare_properties()
            self.client().cluster_templates.update(self.resource_id, **args)

    def validate(self):
        res = super(SaharaClusterTemplate, self).validate()
        if res:
            return res
        # check if running on neutron and MANAGEMENT_NETWORK missing
        if (self.is_using_neutron() and
                not self.properties[self.MANAGEMENT_NETWORK]):
            msg = _("%s must be provided"
                    ) % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )
예제 #16
0
class CloudDns(resource.Resource):

    PROPERTIES = (
        NAME,
        EMAIL_ADDRESS,
        TTL,
        COMMENT,
        RECORDS,
    ) = (
        'name',
        'emailAddress',
        'ttl',
        'comment',
        'records',
    )

    _RECORD_KEYS = (
        RECORD_COMMENT,
        RECORD_NAME,
        RECORD_DATA,
        RECORD_PRIORITY,
        RECORD_TTL,
        RECORD_TYPE,
    ) = (
        'comment',
        'name',
        'data',
        'priority',
        'ttl',
        'type',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Specifies the name for the domain or subdomain. Must be a '
              'valid domain name.'),
            required=True,
            constraints=[
                constraints.Length(min=3),
            ]),
        EMAIL_ADDRESS:
        properties.Schema(
            properties.Schema.STRING,
            _('Email address to use for contacting the domain administrator.'),
            required=True,
            update_allowed=True),
        TTL:
        properties.Schema(properties.Schema.INTEGER,
                          _('How long other servers should cache recorddata.'),
                          default=3600,
                          constraints=[
                              constraints.Range(min=301),
                          ],
                          update_allowed=True),
        COMMENT:
        properties.Schema(properties.Schema.STRING,
                          _('Optional free form text comment'),
                          constraints=[
                              constraints.Length(max=160),
                          ],
                          update_allowed=True),
        RECORDS:
        properties.Schema(
            properties.Schema.LIST,
            _('Domain records'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    RECORD_COMMENT:
                    properties.Schema(properties.Schema.STRING,
                                      _('Optional free form text comment'),
                                      constraints=[
                                          constraints.Length(max=160),
                                      ]),
                    RECORD_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies the name for the domain or '
                          'subdomain. Must be a valid domain name.'),
                        required=True,
                        constraints=[
                            constraints.Length(min=3),
                        ]),
                    RECORD_DATA:
                    properties.Schema(properties.Schema.STRING,
                                      _('Type specific record data'),
                                      required=True),
                    RECORD_PRIORITY:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Required for MX and SRV records, but '
                          'forbidden for other record types. If '
                          'specified, must be an integer from 0 to '
                          '65535.'),
                        constraints=[
                            constraints.Range(0, 65535),
                        ]),
                    RECORD_TTL:
                    properties.Schema(properties.Schema.INTEGER,
                                      _('How long other servers should cache '
                                        'recorddata.'),
                                      default=3600,
                                      constraints=[
                                          constraints.Range(min=301),
                                      ]),
                    RECORD_TYPE:
                    properties.Schema(properties.Schema.STRING,
                                      _('Specifies the record type.'),
                                      required=True,
                                      constraints=[
                                          constraints.AllowedValues([
                                              'A', 'AAAA', 'NS', 'MX', 'CNAME',
                                              'TXT', 'SRV'
                                          ]),
                                      ]),
                },
            ),
            update_allowed=True),
    }

    update_allowed_keys = ('Properties', )

    def cloud_dns(self):
        return self.stack.clients.cloud_dns()

    def handle_create(self):
        """
        Create a Rackspace CloudDns Instance.
        """
        # There is no check_create_complete as the pyrax create for DNS is
        # synchronous.
        logger.debug(_("CloudDns handle_create called."))
        args = dict((k, v) for k, v in self.properties.items())
        for rec in args[self.RECORDS] or {}:
            # only pop the priority for the correct types
            rec_type = rec[self.RECORD_TYPE]
            if (rec_type != 'MX') and (rec_type != 'SRV'):
                rec.pop(self.RECORD_PRIORITY, None)
        dom = self.cloud_dns().create(**args)
        self.resource_id_set(dom.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        """
        Update a Rackspace CloudDns Instance.
        """
        logger.debug(_("CloudDns handle_update called."))
        if not self.resource_id:
            raise exception.Error(_('Update called on a non-existent domain'))
        if prop_diff:
            dom = self.cloud_dns().get(self.resource_id)

            # handle records separately
            records = prop_diff.pop(self.RECORDS, {})

            # Handle top level domain properties
            dom.update(**prop_diff)

        # handle records
        if records:
            recs = dom.list_records()
            # 1. delete all the current records other than rackspace NS records
            [
                rec.delete() for rec in recs
                if rec.type != 'NS' or 'stabletransit.com' not in rec.data
            ]
            # 2. update with the new records in prop_diff
            dom.add_records(records)

    def handle_delete(self):
        """
        Delete a Rackspace CloudDns Instance.
        """
        logger.debug(_("CloudDns handle_delete called."))
        if self.resource_id:
            try:
                dom = self.cloud_dns().get(self.resource_id)
                dom.delete()
            except NotFound:
                pass
        self.resource_id_set(None)
예제 #17
0
class SaharaNodeGroupTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
        VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE,
        SECURITY_GROUPS, AUTO_SECURITY_GROUP,
        AVAILABILITY_ZONE, VOLUMES_AVAILABILITY_ZONE,
        NODE_PROCESSES, FLOATING_IP_POOL, NODE_CONFIGS, IMAGE_ID,
        IS_PROXY_GATEWAY, VOLUME_LOCAL_TO_INSTANCE, USE_AUTOCONFIG,
        SHARES

    ) = (
        'name', 'plugin_name', 'hadoop_version', 'flavor', 'description',
        'volumes_per_node', 'volumes_size', 'volume_type',
        'security_groups', 'auto_security_group',
        'availability_zone', 'volumes_availability_zone',
        'node_processes', 'floating_ip_pool', 'node_configs', 'image_id',
        'is_proxy_gateway', 'volume_local_to_instance', 'use_autoconfig',
        'shares'
    )

    _SHARE_KEYS = (
        SHARE_ID, PATH, ACCESS_LEVEL
    ) = (
        'id', 'path', 'access_level'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Node Group Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Node Group Template.'),
            default="",
            update_allowed=True
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ],
            update_allowed=True
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
            update_allowed=True
        ),
        FLAVOR: properties.Schema(
            properties.Schema.STRING,
            _('Name or ID Nova flavor for the nodes.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('nova.flavor')
            ],
            update_allowed=True
        ),
        VOLUMES_PER_NODE: properties.Schema(
            properties.Schema.INTEGER,
            _("Volumes per node."),
            constraints=[
                constraints.Range(min=0),
            ],
            update_allowed=True
        ),
        VOLUMES_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _("Size of the volumes, in GB."),
            constraints=[
                constraints.Range(min=1),
            ],
            update_allowed=True
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _("Type of the volume to create on Cinder backend."),
            constraints=[
                constraints.CustomConstraint('cinder.vtype')
            ],
            update_allowed=True
        ),
        SECURITY_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _("List of security group names or IDs to assign to this "
              "Node Group template."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        AUTO_SECURITY_GROUP: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Defines whether auto-assign security group to this "
              "Node Group template."),
            update_allowed=True
        ),
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create servers in."),
            update_allowed=True
        ),
        VOLUMES_AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create volumes in."),
            update_allowed=True
        ),
        NODE_PROCESSES: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to run on every node."),
            required=True,
            constraints=[
                constraints.Length(min=1),
            ],
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        FLOATING_IP_POOL: properties.Schema(
            properties.Schema.STRING,
            _("Name or UUID of the Neutron floating IP network or "
              "name of the Nova floating ip pool to use. "
              "Should not be provided when used with Nova-network "
              "that auto-assign floating IPs."),
            update_allowed=True
        ),
        NODE_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _("Dictionary of node configurations."),
            update_allowed=True
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            update_allowed=True
        ),
        IS_PROXY_GATEWAY: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Provide access to nodes using other nodes of the cluster "
              "as proxy gateways."),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True
        ),
        VOLUME_LOCAL_TO_INSTANCE: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Create volumes on the same physical port as an instance."),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True
        ),
        SHARES: properties.Schema(
            properties.Schema.LIST,
            _("List of manila shares to be mounted."),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SHARE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("Id of the manila share."),
                        required=True
                    ),
                    PATH: properties.Schema(
                        properties.Schema.STRING,
                        _("Local path on each cluster node on which to mount "
                          "the share. Defaults to '/mnt/{share_id}'.")
                    ),
                    ACCESS_LEVEL: properties.Schema(
                        properties.Schema.STRING,
                        _("Governs permissions set in manila for the cluster "
                          "ips."),
                        constraints=[
                            constraints.AllowedValues(['rw', 'ro']),
                        ],
                        default='rw'
                    )
                }
            ),
            support_status=support.SupportStatus(version='6.0.0'),
            update_allowed=True
        )
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'node_group_templates'

    def _ngt_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def _prepare_properties(self):
        props = {
            'name': self._ngt_name(),
            'plugin_name': self.properties[self.PLUGIN_NAME],
            'hadoop_version': self.properties[self.HADOOP_VERSION],
            'flavor_id': self.client_plugin("nova").find_flavor_by_name_or_id(
                self.properties[self.FLAVOR]),
            'description': self.properties[self.DESCRIPTION],
            'volumes_per_node': self.properties[self.VOLUMES_PER_NODE],
            'volumes_size': self.properties[self.VOLUMES_SIZE],
            'node_processes': self.properties[self.NODE_PROCESSES],
            'node_configs': self.properties[self.NODE_CONFIGS],
            'floating_ip_pool': self.properties[self.FLOATING_IP_POOL],
            'security_groups': self.properties[self.SECURITY_GROUPS],
            'auto_security_group': self.properties[self.AUTO_SECURITY_GROUP],
            'availability_zone': self.properties[self.AVAILABILITY_ZONE],
            'volumes_availability_zone': self.properties[
                self.VOLUMES_AVAILABILITY_ZONE],
            'volume_type': self.properties[self.VOLUME_TYPE],
            'image_id': self.properties[self.IMAGE_ID],
            'is_proxy_gateway': self.properties[self.IS_PROXY_GATEWAY],
            'volume_local_to_instance': self.properties[
                self.VOLUME_LOCAL_TO_INSTANCE],
            'use_autoconfig': self.properties[self.USE_AUTOCONFIG],
            'shares': self.properties[self.SHARES]
        }
        if props['floating_ip_pool'] and self.is_using_neutron():
            props['floating_ip_pool'] = self.client_plugin(
                'neutron').find_neutron_resource(
                    self.properties, self.FLOATING_IP_POOL, 'network')
        return props

    def handle_create(self):
        args = self._prepare_properties()
        node_group_template = self.client().node_group_templates.create(**args)
        LOG.info(_LI("Node Group Template '%s' has been created"),
                 node_group_template.name)
        self.resource_id_set(node_group_template.id)
        return self.resource_id

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.properties = json_snippet.properties(
                self.properties_schema,
                self.context)
            args = self._prepare_properties()
            self.client().node_group_templates.update(self.resource_id, **args)

    def validate(self):
        res = super(SaharaNodeGroupTemplate, self).validate()
        if res:
            return res
        pool = self.properties[self.FLOATING_IP_POOL]
        if pool:
            if self.is_using_neutron():
                try:
                    self.client_plugin('neutron').find_neutron_resource(
                        self.properties, self.FLOATING_IP_POOL, 'network')
                except Exception as ex:
                    if (self.client_plugin('neutron').is_not_found(ex)
                            or self.client_plugin('neutron').is_no_unique(ex)):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise
            else:
                try:
                    self.client('nova').floating_ip_pools.find(name=pool)
                except Exception as ex:
                    if self.client_plugin('nova').is_not_found(ex):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )

        # validate node processes
        plugin = self.client().plugins.get_version_details(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION])
        allowed_processes = [item for sublist in
                             list(six.itervalues(plugin.node_processes))
                             for item in sublist]
        unsupported_processes = []
        for process in self.properties[self.NODE_PROCESSES]:
            if process not in allowed_processes:
                unsupported_processes.append(process)
        if unsupported_processes:
            msg = (_("Plugin %(plugin)s doesn't support the following "
                     "node processes: %(unsupported)s. Allowed processes are: "
                     "%(allowed)s") %
                   {'plugin': self.properties[self.PLUGIN_NAME],
                    'unsupported': ', '.join(unsupported_processes),
                    'allowed': ', '.join(allowed_processes)})
            raise exception.StackValidationFailed(
                path=[self.stack.t.get_section_name('resources'),
                      self.name,
                      self.stack.t.get_section_name('properties')],
                message=msg)
예제 #18
0
파일: recordset.py 프로젝트: odmanV2/heat
class DesignateRecordSet(resource.Resource):
    """Heat Template Resource for Designate RecordSet.

    Designate provides DNS-as-a-Service services for OpenStack. RecordSet
    helps to add more than one records.
    """

    support_status = support.SupportStatus(version='8.0.0')

    PROPERTIES = (NAME, TTL, DESCRIPTION, TYPE, RECORDS,
                  ZONE) = ('name', 'ttl', 'description', 'type', 'records',
                           'zone')

    _ALLOWED_TYPES = (A, AAAA, CNAME, MX, SRV, TXT, SPF, NS, PTR, SSHFP,
                      SOA) = ('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF',
                              'NS', 'PTR', 'SSHFP', 'SOA')

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('RecordSet name.'),
                          constraints=[constraints.Length(max=255)]),
        # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
        TTL:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds).'),
            update_allowed=True,
            constraints=[constraints.Range(min=1, max=2147483647)]),
        # designate mandates to the max length of 160 for description
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of RecordSet.'),
                          update_allowed=True,
                          constraints=[constraints.Length(max=160)]),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS RecordSet type.'),
            required=True,
            constraints=[constraints.AllowedValues(_ALLOWED_TYPES)]),
        RECORDS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of data for this RecordSet. Each item will be a '
              'separate record in Designate These items should conform to the '
              'DNS spec for the record type - e.g. A records must be IPv4 '
              'addresses, CNAME records must be a hostname. DNS record data '
              'varies based on the type of record. For more details, please '
              'refer rfc 1035.'),
            update_allowed=True,
            required=True),
        ZONE:
        properties.Schema(
            properties.Schema.STRING,
            _('DNS Zone id or name.'),
            required=True,
            constraints=[constraints.CustomConstraint('designate.zone')]),
    }

    default_client_name = 'designate'

    entity = 'recordsets'

    def handle_create(self):
        args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
        args['type_'] = args.pop(self.TYPE)
        if not args.get(self.NAME):
            args[self.NAME] = self.physical_resource_name()

        rs = self.client().recordsets.create(**args)

        self.resource_id_set(rs['id'])

    def _check_status_complete(self):
        recordset = self.client().recordsets.get(
            recordset=self.resource_id, zone=self.properties[self.ZONE])

        if recordset['status'] == 'ERROR':
            raise exception.ResourceInError(
                resource_status=recordset['status'],
                status_reason=_('Error in RecordSet'))

        return recordset['status'] != 'PENDING'

    def check_create_complete(self, handler_data=None):
        return self._check_status_complete()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict()

        for prp in (self.TTL, self.DESCRIPTION, self.RECORDS):
            if prop_diff.get(prp):
                args[prp] = prop_diff.get(prp)

        if prop_diff.get(self.TYPE):
            args['type_'] = prop_diff.get(self.TYPE)

        if len(args.keys()) > 0:
            self.client().recordsets.update(recordset=self.resource_id,
                                            zone=self.properties[self.ZONE],
                                            values=args)

    def check_update_complete(self, handler_data=None):
        return self._check_status_complete()

    def handle_delete(self):
        if self.resource_id is not None:
            with self.client_plugin().ignore_not_found:
                self.client().recordsets.delete(
                    recordset=self.resource_id,
                    zone=self.properties[self.ZONE])
                return self.resource_id

    def check_delete_complete(self, handler_data=None):
        if handler_data:
            with self.client_plugin().ignore_not_found:
                return self._check_status_complete()

        return True

    def _show_resource(self):
        return self.client().recordsets.get(recordset=self.resource_id,
                                            zone=self.properties[self.ZONE])
예제 #19
0
 def test_length_invalid_type(self):
     schema = constraints.Schema('Integer',
                                 constraints=[constraints.Length(1, 10)])
     err = self.assertRaises(exception.InvalidSchemaError, schema.validate)
     self.assertIn('Length constraint invalid for Integer',
                   six.text_type(err))
예제 #20
0
class NetworkGateway(neutron.NeutronResource):
    '''
    A resource for the Network Gateway resource in Neutron Network Gateway.
    '''

    PROPERTIES = (
        NAME,
        DEVICES,
        CONNECTIONS,
    ) = (
        'name',
        'devices',
        'connections',
    )

    _DEVICES_KEYS = (
        ID,
        INTERFACE_NAME,
    ) = (
        'id',
        'interface_name',
    )

    _CONNECTIONS_KEYS = (
        NETWORK_ID,
        SEGMENTATION_TYPE,
        SEGMENTATION_ID,
    ) = (
        'network_id',
        'segmentation_type',
        'segmentation_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          description=_('The name of the network gateway.'),
                          update_allowed=True),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Device info for this network gateway.'),
            required=True,
            constraints=[constraints.Length(min=1)],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ID:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The device id for the network '
                                          'gateway.'),
                                      required=True),
                    INTERFACE_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The interface name for the '
                                          'network gateway.'),
                                      required=True)
                })),
        CONNECTIONS:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Connection info for this network gateway.'),
            default={},
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'The id of internal network to connect on '
                            'the network gateway.'),
                        required=True),
                    SEGMENTATION_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'L2 segmentation strategy on the external '
                            'side of the network gateway.'),
                        default='flat',
                        constraints=[
                            constraints.AllowedValues(('flat', 'vlan'))
                        ]),
                    SEGMENTATION_ID:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        description=_(
                            'The id for L2 segment on the external side '
                            'of the network gateway. Must be specified '
                            'when using vlan.'),
                        constraints=[constraints.Range(0, 4094)])
                }))
    }

    attributes_schema = {
        "default": _("A boolean value of default flag."),
        "show": _("All attributes.")
    }

    update_allowed_keys = ('Properties', )

    def _show_resource(self):
        return self.neutron().show_network_gateway(
            self.resource_id)['network_gateway']

    def validate(self):
        '''
        Validate any of the provided params
        '''
        super(NetworkGateway, self).validate()
        connections = self.properties[self.CONNECTIONS]

        for connection in connections:
            segmentation_type = connection[self.SEGMENTATION_TYPE]
            segmentation_id = connection.get(self.SEGMENTATION_ID)

            if segmentation_type == 'vlan' and segmentation_id is None:
                msg = _("segmentation_id must be specified for using vlan")
                raise exception.StackValidationFailed(message=msg)

            if segmentation_type == 'flat' and segmentation_id:
                msg = _("segmentation_id cannot be specified except 0 for "
                        "using flat")
                raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        connections = props.pop(self.CONNECTIONS)
        ret = self.neutron().create_network_gateway({'network_gateway':
                                                     props})['network_gateway']

        for connection in connections:
            self.neutron().connect_network_gateway(ret['id'], connection)

        self.resource_id_set(ret['id'])

    def handle_delete(self):
        if not self.resource_id:
            return
        client = self.neutron()

        connections = self.properties[self.CONNECTIONS]
        for connection in connections:
            try:
                client.disconnect_network_gateway(self.resource_id, connection)
            except NeutronClientException as ex:
                self._handle_not_found_exception(ex)

        try:
            client.delete_network_gateway(self.resource_id)
        except NeutronClientException as ex:
            self._handle_not_found_exception(ex)
        else:
            return self._delete_task()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        props = self.prepare_update_properties(json_snippet)
        connections = props.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(props)
            self.handle_create()
            return
        else:
            props.pop(self.DEVICES, None)

        if self.NAME in prop_diff:
            self.neutron().update_network_gateway(self.resource_id,
                                                  {'network_gateway': props})

        if self.CONNECTIONS in prop_diff:
            for connection in self.properties[self.CONNECTIONS]:
                try:
                    self.neutron().disconnect_network_gateway(
                        self.resource_id, connection)
                except NeutronClientException as ex:
                    self._handle_not_found_exception(ex)
            for connection in connections:
                self.neutron().connect_network_gateway(self.resource_id,
                                                       connection)
예제 #21
0
 def test_length_min_schema(self):
     d = {'length': {'min': 5}, 'description': 'a length range'}
     r = constraints.Length(min=5, description='a length range')
     self.assertEqual(d, dict(r))
예제 #22
0
class DesignateDomain(resource.Resource):
    """Heat Template Resource for Designate Domain."""

    support_status = support.SupportStatus(version='5.0.0')

    PROPERTIES = (NAME, TTL, DESCRIPTION, EMAIL) = ('name', 'ttl',
                                                    'description', 'email')

    ATTRIBUTES = (SERIAL, ) = ('serial', )

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Domain name.'),
                          required=True,
                          constraints=[constraints.Length(max=255)]),
        # Based on RFC 1035, range for ttl is set to 0 to signed 32 bit number
        TTL:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds).'),
            update_allowed=True,
            constraints=[constraints.Range(min=0, max=2147483647)]),
        # designate mandates to the max length of 160 for description
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of domain.'),
                          update_allowed=True,
                          constraints=[constraints.Length(max=160)]),
        EMAIL:
        properties.Schema(properties.Schema.STRING,
                          _('Domain email.'),
                          update_allowed=True,
                          required=True)
    }

    attributes_schema = {
        SERIAL:
        attributes.Schema(_("DNS domain serial."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'designate'

    def handle_create(self):
        args = dict(name=self.properties[self.NAME],
                    email=self.properties[self.EMAIL],
                    description=self.properties[self.DESCRIPTION],
                    ttl=self.properties[self.TTL])

        domain = self.client_plugin().domain_create(**args)

        self.resource_id_set(domain.id)

    def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None):
        args = dict()

        if prop_diff.get(self.EMAIL):
            args['email'] = prop_diff.get(self.EMAIL)

        if prop_diff.get(self.TTL):
            args['ttl'] = prop_diff.get(self.TTL)

        if prop_diff.get(self.DESCRIPTION):
            args['description'] = prop_diff.get(self.DESCRIPTION)

        if len(args.keys()) > 0:
            args['id'] = self.resource_id
            self.client_plugin().domain_update(**args)

    def handle_delete(self):
        if self.resource_id is not None:
            try:
                self.client().domains.delete(self.resource_id)
            except Exception as ex:
                self.client_plugin().ignore_not_found(ex)

    def _resolve_attribute(self, name):
        if name == self.SERIAL:
            domain = self.client().domains.get(self.resource_id)
            return domain.serial
예제 #23
0
 def test_length_validate(self):
     l = constraints.Length(min=5, max=5, description='a range')
     l.validate('abcde')
예제 #24
0
class NetworkGateway(neutron.NeutronResource):
    """Network Gateway resource in Neutron Network Gateway.

    Resource for connecting internal networks with specified devices.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        NAME,
        DEVICES,
        CONNECTIONS,
    ) = (
        'name',
        'devices',
        'connections',
    )

    ATTRIBUTES = (DEFAULT, ) = ('default', )

    _DEVICES_KEYS = (
        ID,
        INTERFACE_NAME,
    ) = (
        'id',
        'interface_name',
    )

    _CONNECTIONS_KEYS = (
        NETWORK_ID,
        NETWORK,
        SEGMENTATION_TYPE,
        SEGMENTATION_ID,
    ) = (
        'network_id',
        'network',
        'segmentation_type',
        'segmentation_id',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          description=_('The name of the network gateway.'),
                          update_allowed=True),
        DEVICES:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Device info for this network gateway.'),
            required=True,
            constraints=[constraints.Length(min=1)],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    ID:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The device id for the network '
                                          'gateway.'),
                                      required=True),
                    INTERFACE_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      description=_(
                                          'The interface name for the '
                                          'network gateway.'),
                                      required=True)
                })),
        CONNECTIONS:
        properties.Schema(
            properties.Schema.LIST,
            description=_('Connection info for this network gateway.'),
            default={},
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NETWORK_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            message=_('Use property %s.') % NETWORK,
                            version='5.0.0',
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED, version='2014.2')),
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    NETWORK:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_('The internal network to connect on '
                                      'the network gateway.'),
                        support_status=support.SupportStatus(version='2014.2'),
                        required=True,
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ],
                    ),
                    SEGMENTATION_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        description=_(
                            'L2 segmentation strategy on the external '
                            'side of the network gateway.'),
                        default='flat',
                        constraints=[
                            constraints.AllowedValues(('flat', 'vlan'))
                        ]),
                    SEGMENTATION_ID:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        description=_(
                            'The id for L2 segment on the external side '
                            'of the network gateway. Must be specified '
                            'when using vlan.'),
                        constraints=[constraints.Range(0, 4094)])
                }))
    }

    attributes_schema = {
        DEFAULT:
        attributes.Schema(_("A boolean value of default flag."),
                          type=attributes.Schema.STRING),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.CONNECTIONS, self.NETWORK],
                                        value_name=self.NETWORK_ID)
        ]

    def _show_resource(self):
        return self.client().show_network_gateway(
            self.resource_id)['network_gateway']

    def validate(self):
        """Validate any of the provided params."""
        super(NetworkGateway, self).validate()
        connections = self.properties[self.CONNECTIONS]

        for connection in connections:
            segmentation_type = connection[self.SEGMENTATION_TYPE]
            segmentation_id = connection.get(self.SEGMENTATION_ID)

            if segmentation_type == 'vlan' and segmentation_id is None:
                msg = _("segmentation_id must be specified for using vlan")
                raise exception.StackValidationFailed(message=msg)

            if segmentation_type == 'flat' and segmentation_id:
                msg = _("segmentation_id cannot be specified except 0 for "
                        "using flat")
                raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())

        connections = props.pop(self.CONNECTIONS)
        ret = self.client().create_network_gateway({'network_gateway':
                                                    props})['network_gateway']

        self.resource_id_set(ret['id'])

        for connection in connections:
            self.client_plugin().resolve_network(connection, self.NETWORK,
                                                 'network_id')
            if self.NETWORK in six.iterkeys(connection):
                connection.pop(self.NETWORK)
            self.client().connect_network_gateway(ret['id'], connection)

    def handle_delete(self):
        if not self.resource_id:
            return

        connections = self.properties[self.CONNECTIONS]
        for connection in connections:
            with self.client_plugin().ignore_not_found:
                self.client_plugin().resolve_network(connection, self.NETWORK,
                                                     'network_id')
                if self.NETWORK in six.iterkeys(connection):
                    connection.pop(self.NETWORK)
                self.client().disconnect_network_gateway(
                    self.resource_id, connection)

        try:
            self.client().delete_network_gateway(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            return True

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        connections = None
        if self.CONNECTIONS in prop_diff:
            connections = prop_diff.pop(self.CONNECTIONS)

        if self.DEVICES in prop_diff:
            self.handle_delete()
            self.properties.data.update(prop_diff)
            self.handle_create()
            return

        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client().update_network_gateway(
                self.resource_id, {'network_gateway': prop_diff})

        if connections:
            for connection in self.properties[self.CONNECTIONS]:
                with self.client_plugin().ignore_not_found:
                    self.client_plugin().resolve_network(
                        connection, self.NETWORK, 'network_id')
                    if self.NETWORK in six.iterkeys(connection):
                        connection.pop(self.NETWORK)
                    self.client().disconnect_network_gateway(
                        self.resource_id, connection)
            for connection in connections:
                self.client_plugin().resolve_network(connection, self.NETWORK,
                                                     'network_id')
                if self.NETWORK in six.iterkeys(connection):
                    connection.pop(self.NETWORK)
                self.client().connect_network_gateway(self.resource_id,
                                                      connection)
예제 #25
0
파일: os_database.py 프로젝트: mrhearn/heat
class OSDBInstance(resource.Resource):
    '''
    OpenStack cloud database instance resource.
    '''

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
    ) = (
        'name',
        'flavor',
        'size',
        'databases',
        'users',
        'availability_zone',
        'restore_point',
        'datastore_type',
        'datastore_version',
        'networks',
    )

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(properties.Schema.STRING,
                                      _('Fixed IPv4 address for this NIC.')),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%'),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
    }

    attributes_schema = {
        HOSTNAME: attributes.Schema(_("Hostname of the instance.")),
        HREF: attributes.Schema(_("Api endpoint reference of the instance.")),
    }

    default_client_name = 'trove'

    def __init__(self, name, json_snippet, stack):
        super(OSDBInstance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.trove().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        '''
        Create cloud database instance.
        '''
        self.flavor = self.client_plugin().get_flavor_id(
            self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties.get(self.DATABASES)
        self.users = self.properties.get(self.USERS)
        restore_point = self.properties.get(self.RESTORE_POINT)
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties.get(self.AVAILABILITY_ZONE)
        self.datastore_type = self.properties.get(self.DATASTORE_TYPE)
        self.datastore_version = self.properties.get(self.DATASTORE_VERSION)

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties.get(self.NICS):
            nic_dict = {}
            net = nic.get(self.NET)
            if net:
                if self.is_using_neutron():
                    net_id = (
                        self.client_plugin('neutron').find_neutron_resource(
                            nic, self.NET, 'network'))
                else:
                    net_id = (
                        self.client_plugin('nova').get_nova_network_id(net))
                nic_dict['net-id'] = net_id
            port = nic.get(self.PORT)
            if port:
                neutron = self.client_plugin('neutron')
                nic_dict['port-id'] = neutron.find_neutron_resource(
                    self.properties, self.PORT, 'port')
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create db instance
        instance = self.trove().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.trove().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warn(
                    _LW("Stack %(name)s (%(id)s) received an "
                        "OverLimit response during instance.get():"
                        " %(exception)s"), {
                            'name': self.stack.name,
                            'id': self.stack.id,
                            'exception': exc
                        })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        '''
        Check if cloud DB instance creation is complete.
        '''
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise resource.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            _LI("Database instance %(database)s created (flavor:%("
                "flavor)s,volume:%(volume)s, datastore:%("
                "datastore_type)s, datastore_version:%("
                "datastore_version)s)"), {
                    'database': self._dbinstance_name(),
                    'flavor': self.flavor,
                    'volume': self.volume,
                    'datastore_type': self.datastore_type,
                    'datastore_version': self.datastore_version
                })
        return True

    def handle_check(self):
        instance = self.trove().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_delete(self):
        '''
        Delete a cloud database instance.
        '''
        if not self.resource_id:
            return

        try:
            instance = self.trove().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        '''
        Check for completion of cloud DB instance deletion
        '''
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(OSDBInstance, self).validate()
        if res:
            return res

        datastore_type = self.properties.get(self.DATASTORE_TYPE)
        datastore_version = self.properties.get(self.DATASTORE_VERSION)

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties.get(self.USERS)
        if users:
            databases = self.properties.get(self.DATABASES)
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties.get(self.NICS)
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
예제 #26
0
파일: keypair.py 프로젝트: odmanV2/heat
class KeyPair(resource.Resource):
    """A resource for creating Nova key pairs.

    A keypair is a ssh key that can be injected into a server on launch.

    **Note** that if a new key is generated setting `save_private_key` to
    `True` results in the system saving the private key which can then be
    retrieved via the `private_key` attribute of this resource.

    Setting the `public_key` property means that the `private_key` attribute
    of this resource will always return an empty string regardless of the
    `save_private_key` setting since there will be no private key data to
    save.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        NAME,
        SAVE_PRIVATE_KEY,
        PUBLIC_KEY,
        KEY_TYPE,
        USER,
    ) = (
        'name',
        'save_private_key',
        'public_key',
        'type',
        'user',
    )

    ATTRIBUTES = (
        PUBLIC_KEY_ATTR,
        PRIVATE_KEY_ATTR,
    ) = (
        'public_key',
        'private_key',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('The name of the key pair.'),
                          required=True,
                          constraints=[constraints.Length(min=1, max=255)]),
        SAVE_PRIVATE_KEY:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('True if the system should remember a generated private key; '
              'False otherwise.'),
            default=False),
        PUBLIC_KEY:
        properties.Schema(
            properties.Schema.STRING,
            _('The optional public key. This allows users to supply the '
              'public key from a pre-existing key pair. If not supplied, a '
              'new key pair will be generated.')),
        KEY_TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Keypair type. Supported since Nova api version 2.2.'),
            constraints=[constraints.AllowedValues(['ssh', 'x509'])],
            support_status=support.SupportStatus(version='8.0.0')),
        USER:
        properties.Schema(
            properties.Schema.STRING,
            _('ID or name of user to whom to add key-pair. The usage of this '
              'property is limited to being used by administrators only. '
              'Supported since Nova api version 2.10.'),
            constraints=[constraints.CustomConstraint('keystone.user')],
            support_status=support.SupportStatus(version='9.0.0')),
    }

    attributes_schema = {
        PUBLIC_KEY_ATTR:
        attributes.Schema(_('The public key.'), type=attributes.Schema.STRING),
        PRIVATE_KEY_ATTR:
        attributes.Schema(_('The private key if it has been saved.'),
                          cache_mode=attributes.Schema.CACHE_NONE,
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'nova'

    entity = 'keypairs'

    def __init__(self, name, json_snippet, stack):
        super(KeyPair, self).__init__(name, json_snippet, stack)
        self._public_key = None

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE, [self.USER],
                client_plugin=self.client_plugin('keystone'),
                finder='get_user_id')
        ]

    @property
    def private_key(self):
        """Return the private SSH key for the resource."""
        if self.properties[self.SAVE_PRIVATE_KEY]:
            return self.data().get('private_key', '')
        else:
            return ''

    @property
    def public_key(self):
        """Return the public SSH key for the resource."""
        if not self._public_key:
            if self.properties[self.PUBLIC_KEY]:
                self._public_key = self.properties[self.PUBLIC_KEY]
            elif self.resource_id:
                nova_key = self.client_plugin().get_keypair(self.resource_id)
                self._public_key = nova_key.public_key
        return self._public_key

    def validate(self):
        super(KeyPair, self).validate()

        # Check if key_type is allowed to use
        key_type = self.properties[self.KEY_TYPE]
        user = self.properties[self.USER]

        validate_props = []
        c_plugin = self.client_plugin()
        if key_type and not c_plugin.is_version_supported(
                MICROVERSION_KEY_TYPE):
            validate_props.append(self.KEY_TYPE)
        if user and not c_plugin.is_version_supported(MICROVERSION_USER):
            validate_props.append(self.USER)
        if validate_props:
            msg = (_('Cannot use "%s" properties - nova does not '
                     'support required api microversion.') % validate_props)
            raise exception.StackValidationFailed(message=msg)

    def handle_create(self):
        pub_key = self.properties[self.PUBLIC_KEY] or None
        user_id = self.properties[self.USER]
        key_type = self.properties[self.KEY_TYPE]

        create_kwargs = {
            'name': self.properties[self.NAME],
            'public_key': pub_key
        }

        if key_type:
            create_kwargs[self.KEY_TYPE] = key_type
        if user_id:
            create_kwargs['user_id'] = user_id

        new_keypair = self.client().keypairs.create(**create_kwargs)

        if (self.properties[self.SAVE_PRIVATE_KEY]
                and hasattr(new_keypair, 'private_key')):
            self.data_set('private_key', new_keypair.private_key, True)
        self.resource_id_set(new_keypair.id)

    def handle_check(self):
        self.client().keypairs.get(self.resource_id)

    def _resolve_attribute(self, key):
        attr_fn = {
            self.PRIVATE_KEY_ATTR: self.private_key,
            self.PUBLIC_KEY_ATTR: self.public_key
        }
        return six.text_type(attr_fn[key])

    def get_reference_id(self):
        return self.resource_id

    def prepare_for_replace(self):
        if self.resource_id is None:
            return

        with self.client_plugin().ignore_not_found:
            self.client().keypairs.delete(self.resource_id)
예제 #27
0
class CloudNetwork(resource.Resource):
    """A resource for creating Rackspace Cloud Networks.

    See http://www.rackspace.com/cloud/networks/ for service
    documentation.
    """

    support_status = support.SupportStatus(
        status=support.DEPRECATED,
        message=_('Use OS::Neutron::Net instead.'),
        version='2015.1')

    PROPERTIES = (LABEL, CIDR) = ("label", "cidr")

    ATTRIBUTES = (
        CIDR_ATTR,
        LABEL_ATTR,
    ) = (
        'cidr',
        'label',
    )

    properties_schema = {
        LABEL:
        properties.Schema(properties.Schema.STRING,
                          _("The name of the network."),
                          required=True,
                          constraints=[constraints.Length(min=3, max=64)]),
        CIDR:
        properties.Schema(
            properties.Schema.STRING,
            _("The IP block from which to allocate the network. For example, "
              "172.16.0.0/24 or 2001:DB8::/64."),
            required=True,
            constraints=[constraints.CustomConstraint('net_cidr')])
    }

    attributes_schema = {
        CIDR_ATTR:
        attributes.Schema(_("The CIDR for an isolated private network.")),
        LABEL_ATTR:
        attributes.Schema(_("The name of the network.")),
    }

    def __init__(self, name, json_snippet, stack):
        resource.Resource.__init__(self, name, json_snippet, stack)
        self._network = None

    def network(self):
        if self.resource_id and not self._network:
            try:
                self._network = self.cloud_networks().get(self.resource_id)
            except NotFound:
                LOG.warn(
                    _LW("Could not find network %s but resource id is"
                        " set."), self.resource_id)
        return self._network

    def cloud_networks(self):
        return self.client('cloud_networks')

    def handle_create(self):
        cnw = self.cloud_networks().create(label=self.properties[self.LABEL],
                                           cidr=self.properties[self.CIDR])
        self.resource_id_set(cnw.id)

    def handle_check(self):
        self.cloud_networks().get(self.resource_id)

    def handle_delete(self):
        '''Delete cloud network.

        Cloud Network doesn't have a status attribute, and there is a non-zero
        window between the deletion of a server and the acknowledgement from
        the cloud network that it's no longer in use, so it needs some way to
        keep track of when the delete call was successfully issued.
        '''
        network_info = {
            'delete_issued': False,
            'network': self.network(),
        }
        return network_info

    def check_delete_complete(self, network_info):
        network = network_info['network']

        if not network:
            return True

        if not network_info['delete_issued']:
            try:
                network.delete()
            except NetworkInUse:
                LOG.warn("Network '%s' still in use." % network.id)
            else:
                network_info['delete_issued'] = True
            return False

        try:
            network.get()
        except NotFound:
            return True

        return False

    def validate(self):
        super(CloudNetwork, self).validate()

    def _resolve_attribute(self, name):
        net = self.network()
        if net:
            return six.text_type(getattr(net, name))
        return ""
예제 #28
0
class ResourceGroup(stack_resource.StackResource):
    """Creates one or more identically configured nested resources.

    In addition to the `refs` attribute, this resource implements synthetic
    attributes that mirror those of the resources in the group. When
    getting an attribute from this resource, however, a list of attribute
    values for each resource in the group is returned. To get attribute values
    for a single resource in the group, synthetic attributes of the form
    `resource.{resource index}.{attribute name}` can be used. The resource ID
    of a particular resource in the group can be obtained via the synthetic
    attribute `resource.{resource index}`. Note, that if you get attribute
    without `{resource index}`, e.g. `[resource, {attribute_name}]`, you'll get
    a list of this attribute's value for all resources in group.

    While each resource in the group will be identically configured, this
    resource does allow for some index-based customization of the properties
    of the resources in the group. For example::

      resources:
        my_indexed_group:
          type: OS::Heat::ResourceGroup
          properties:
            count: 3
            resource_def:
              type: OS::Nova::Server
              properties:
                # create a unique name for each server
                # using its index in the group
                name: my_server_%index%
                image: CentOS 6.5
                flavor: 4GB Performance

    would result in a group of three servers having the same image and flavor,
    but names of `my_server_0`, `my_server_1`, and `my_server_2`. The variable
    used for substitution can be customized by using the `index_var` property.
    """

    support_status = support.SupportStatus(version='2014.1')

    PROPERTIES = (
        COUNT,
        INDEX_VAR,
        RESOURCE_DEF,
        REMOVAL_POLICIES,
        REMOVAL_POLICIES_MODE,
    ) = ('count', 'index_var', 'resource_def', 'removal_policies',
         'removal_policies_mode')

    _RESOURCE_DEF_KEYS = (
        RESOURCE_DEF_TYPE,
        RESOURCE_DEF_PROPERTIES,
        RESOURCE_DEF_METADATA,
    ) = (
        'type',
        'properties',
        'metadata',
    )

    _REMOVAL_POLICIES_KEYS = (REMOVAL_RSRC_LIST, ) = ('resource_list', )

    _REMOVAL_POLICY_MODES = (REMOVAL_POLICY_APPEND,
                             REMOVAL_POLICY_UPDATE) = ('append', 'update')

    _ROLLING_UPDATES_SCHEMA_KEYS = (
        MIN_IN_SERVICE,
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'min_in_service',
        'max_batch_size',
        'pause_time',
    )

    _BATCH_CREATE_SCHEMA_KEYS = (
        MAX_BATCH_SIZE,
        PAUSE_TIME,
    ) = (
        'max_batch_size',
        'pause_time',
    )

    _UPDATE_POLICY_SCHEMA_KEYS = (
        ROLLING_UPDATE,
        BATCH_CREATE,
    ) = (
        'rolling_update',
        'batch_create',
    )

    ATTRIBUTES = (REFS, REFS_MAP, ATTR_ATTRIBUTES,
                  REMOVED_RSRC_LIST) = ('refs', 'refs_map', 'attributes',
                                        'removed_rsrc_list')

    properties_schema = {
        COUNT:
        properties.Schema(properties.Schema.INTEGER,
                          _('The number of resources to create.'),
                          default=1,
                          constraints=[
                              constraints.Range(min=0),
                          ],
                          update_allowed=True),
        INDEX_VAR:
        properties.Schema(
            properties.Schema.STRING,
            _('A variable that this resource will use to replace with the '
              'current index of a given resource in the group. Can be used, '
              'for example, to customize the name property of grouped '
              'servers in order to differentiate them when listed with '
              'nova client.'),
            default="%index%",
            constraints=[constraints.Length(min=3)],
            support_status=support.SupportStatus(version='2014.2')),
        RESOURCE_DEF:
        properties.Schema(
            properties.Schema.MAP,
            _('Resource definition for the resources in the group. The value '
              'of this property is the definition of a resource just as if '
              'it had been declared in the template itself.'),
            schema={
                RESOURCE_DEF_TYPE:
                properties.Schema(properties.Schema.STRING,
                                  _('The type of the resources in the group.'),
                                  required=True),
                RESOURCE_DEF_PROPERTIES:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Property values for the resources in the group.')),
                RESOURCE_DEF_METADATA:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Supplied metadata for the resources in the group.'),
                    support_status=support.SupportStatus(version='5.0.0')),
            },
            required=True,
            update_allowed=True),
        REMOVAL_POLICIES:
        properties.Schema(
            properties.Schema.LIST,
            _('Policies for removal of resources on update.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                _('Policy to be processed when doing an update which '
                  'requires removal of specific resources.'),
                schema={
                    REMOVAL_RSRC_LIST:
                    properties.Schema(
                        properties.Schema.LIST,
                        _("List of resources to be removed "
                          "when doing an update which requires removal of "
                          "specific resources. "
                          "The resource may be specified several ways: "
                          "(1) The resource name, as in the nested stack, "
                          "(2) The resource reference returned from "
                          "get_resource in a template, as available via "
                          "the 'refs' attribute. "
                          "Note this is destructive on update when specified; "
                          "even if the count is not being reduced, and once "
                          "a resource name is removed, its name is never "
                          "reused in subsequent updates."),
                        default=[]),
                },
            ),
            update_allowed=True,
            default=[],
            support_status=support.SupportStatus(version='2015.1')),
        REMOVAL_POLICIES_MODE:
        properties.Schema(
            properties.Schema.STRING,
            _('How to handle changes to removal_policies on update. '
              'The default "append" mode appends to the internal list, '
              '"update" replaces it on update.'),
            default=REMOVAL_POLICY_APPEND,
            constraints=[constraints.AllowedValues(_REMOVAL_POLICY_MODES)],
            update_allowed=True,
            support_status=support.SupportStatus(version='10.0.0')),
    }

    attributes_schema = {
        REFS:
        attributes.Schema(
            _("A list of resource IDs for the resources in the group."),
            type=attributes.Schema.LIST),
        REFS_MAP:
        attributes.Schema(
            _("A map of resource names to IDs for the resources in "
              "the group."),
            type=attributes.Schema.MAP,
            support_status=support.SupportStatus(version='7.0.0'),
        ),
        ATTR_ATTRIBUTES:
        attributes.Schema(
            _("A map of resource names to the specified attribute of each "
              "individual resource. "
              "Requires heat_template_version: 2014-10-16."),
            support_status=support.SupportStatus(version='2014.2'),
            type=attributes.Schema.MAP),
        REMOVED_RSRC_LIST:
        attributes.Schema(
            _("A list of removed resource names."),
            support_status=support.SupportStatus(version='7.0.0'),
            type=attributes.Schema.LIST),
    }

    rolling_update_schema = {
        MIN_IN_SERVICE:
        properties.Schema(properties.Schema.INTEGER,
                          _('The minimum number of resources in service while '
                            'rolling updates are being executed.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
        MAX_BATCH_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum number of resources to replace at once.'),
            constraints=[constraints.Range(min=1)],
            default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.NUMBER,
                          _('The number of seconds to wait between batches of '
                            'updates.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
    }

    batch_create_schema = {
        MAX_BATCH_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The maximum number of resources to create at once.'),
            constraints=[constraints.Range(min=1)],
            default=1),
        PAUSE_TIME:
        properties.Schema(properties.Schema.NUMBER,
                          _('The number of seconds to wait between batches.'),
                          constraints=[constraints.Range(min=0)],
                          default=0),
    }

    update_policy_schema = {
        ROLLING_UPDATE:
        properties.Schema(
            properties.Schema.MAP,
            schema=rolling_update_schema,
            support_status=support.SupportStatus(version='5.0.0')),
        BATCH_CREATE:
        properties.Schema(
            properties.Schema.MAP,
            schema=batch_create_schema,
            support_status=support.SupportStatus(version='5.0.0'))
    }

    def get_size(self):
        return self.properties.get(self.COUNT)

    def validate_nested_stack(self):
        # Only validate the resource definition (which may be a
        # nested template) if count is non-zero, to enable folks
        # to disable features via a zero count if they wish
        if not self.get_size():
            return

        first_name = next(self._resource_names())
        test_tmpl = self._assemble_nested([first_name], include_all=True)
        res_def = next(six.itervalues(test_tmpl.resource_definitions(None)))
        # make sure we can resolve the nested resource type
        self.stack.env.get_class_to_instantiate(res_def.resource_type)

        try:
            name = "%s-%s" % (self.stack.name, self.name)
            nested_stack = self._parse_nested_stack(name, test_tmpl,
                                                    self.child_params())
            nested_stack.strict_validate = False
            nested_stack.validate()
        except Exception as ex:
            path = "%s<%s>" % (self.name, self.template_url)
            raise exception.StackValidationFailed(
                ex, path=[self.stack.t.RESOURCES, path])

    def _current_blacklist(self):
        db_rsrc_names = self.data().get('name_blacklist')
        if db_rsrc_names:
            return db_rsrc_names.split(',')
        else:
            return []

    def _get_new_blacklist_entries(self, properties, current_blacklist):
        insp = grouputils.GroupInspector.from_parent_resource(self)

        # Now we iterate over the removal policies, and update the blacklist
        # with any additional names
        for r in properties.get(self.REMOVAL_POLICIES, []):
            if self.REMOVAL_RSRC_LIST in r:
                # Tolerate string or int list values
                for n in r[self.REMOVAL_RSRC_LIST]:
                    str_n = six.text_type(n)
                    if (str_n in current_blacklist or self.resource_id is None
                            or str_n
                            in insp.member_names(include_failed=True)):
                        yield str_n
                    elif isinstance(n, six.string_types):
                        try:
                            refids = self.get_output(self.REFS_MAP)
                        except (exception.NotFound,
                                exception.TemplateOutputError) as op_err:
                            LOG.debug(
                                'Falling back to resource_by_refid() '
                                ' due to %s', op_err)
                            rsrc = self.nested().resource_by_refid(n)
                            if rsrc is not None:
                                yield rsrc.name
                        else:
                            if refids is not None:
                                for name, refid in refids.items():
                                    if refid == n:
                                        yield name
                                        break

        # Clear output cache from prior to stack update, so we don't get
        # outdated values after stack update.
        self._outputs = None

    def _update_name_blacklist(self, properties):
        """Resolve the remove_policies to names for removal."""
        # To avoid reusing names after removal, we store a comma-separated
        # blacklist in the resource data - in cases where you want to
        # overwrite the stored data, removal_policies_mode: update can be used
        curr_bl = set(self._current_blacklist())
        p_mode = properties.get(self.REMOVAL_POLICIES_MODE,
                                self.REMOVAL_POLICY_APPEND)
        if p_mode == self.REMOVAL_POLICY_UPDATE:
            init_bl = set()
        else:
            init_bl = curr_bl
        updated_bl = init_bl | set(
            self._get_new_blacklist_entries(properties, curr_bl))

        # If the blacklist has changed, update the resource data
        if updated_bl != curr_bl:
            self.data_set('name_blacklist', ','.join(sorted(updated_bl)))

    def _name_blacklist(self):
        """Get the list of resource names to blacklist."""
        bl = set(self._current_blacklist())
        if self.resource_id is None:
            bl |= set(self._get_new_blacklist_entries(self.properties, bl))
        return bl

    def _resource_names(self, size=None):
        name_blacklist = self._name_blacklist()
        if size is None:
            size = self.get_size()

        def is_blacklisted(name):
            return name in name_blacklist

        candidates = six.moves.map(six.text_type, itertools.count())

        return itertools.islice(
            six.moves.filterfalse(is_blacklisted, candidates), size)

    def _count_black_listed(self, existing_members):
        """Return the number of current resource names that are blacklisted."""
        return len(self._name_blacklist() & set(existing_members))

    def handle_create(self):
        self._update_name_blacklist(self.properties)
        if self.update_policy.get(self.BATCH_CREATE) and self.get_size():
            batch_create = self.update_policy[self.BATCH_CREATE]
            max_batch_size = batch_create[self.MAX_BATCH_SIZE]
            pause_sec = batch_create[self.PAUSE_TIME]
            checkers = self._replace(0, max_batch_size, pause_sec)
            if checkers:
                checkers[0].start()
            return checkers
        else:
            names = self._resource_names()
            self.create_with_template(self._assemble_nested(names),
                                      self.child_params())

    def check_create_complete(self, checkers=None):
        if checkers is None:
            return super(ResourceGroup, self).check_create_complete()
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def _run_to_completion(self, template, timeout):
        updater = self.update_with_template(template, {}, timeout)

        while not super(ResourceGroup, self).check_update_complete(updater):
            yield

    def _run_update(self, total_capacity, max_updates, timeout):
        template = self._assemble_for_rolling_update(total_capacity,
                                                     max_updates)
        return self._run_to_completion(template, timeout)

    def check_update_complete(self, checkers):
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        return True

    def res_def_changed(self, prop_diff):
        return self.RESOURCE_DEF in prop_diff

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if tmpl_diff:
            # parse update policy
            if tmpl_diff.update_policy_changed():
                up = json_snippet.update_policy(self.update_policy_schema,
                                                self.context)
                self.update_policy = up

        checkers = []
        self.properties = json_snippet.properties(self.properties_schema,
                                                  self.context)
        self._update_name_blacklist(self.properties)
        if prop_diff and self.res_def_changed(prop_diff):
            updaters = self._try_rolling_update()
            if updaters:
                checkers.extend(updaters)

        if not checkers:
            resizer = scheduler.TaskRunner(
                self._run_to_completion,
                self._assemble_nested(self._resource_names()),
                self.stack.timeout_mins)
            checkers.append(resizer)

        checkers[0].start()
        return checkers

    def _attribute_output_name(self, *attr_path):
        if attr_path[0] == self.REFS:
            return self.REFS
        return ', '.join(six.text_type(a) for a in attr_path)

    def get_attribute(self, key, *path):
        if key == self.REMOVED_RSRC_LIST:
            return self._current_blacklist()
        if key == self.ATTR_ATTRIBUTES and not path:
            raise exception.InvalidTemplateAttribute(resource=self.name,
                                                     key=key)

        is_resource_ref = (key.startswith("resource.") and not path
                           and (len(key.split('.', 2)) == 2))
        if is_resource_ref:
            output_name = self.REFS_MAP
        else:
            output_name = self._attribute_output_name(key, *path)

        if self.resource_id is not None:
            try:
                output = self.get_output(output_name)
            except (exception.NotFound,
                    exception.TemplateOutputError) as op_err:
                LOG.debug('Falling back to grouputils due to %s', op_err)
            else:
                if is_resource_ref:
                    try:
                        target = key.split('.', 2)[1]
                        return output[target]
                    except KeyError:
                        raise exception.NotFound(
                            _("Member '%(mem)s' not "
                              "found in group resource "
                              "'%(grp)s'.") % {
                                  'mem': target,
                                  'grp': self.name
                              })
                if key == self.REFS:
                    return attributes.select_from_attribute(output, path)
                return output

        if key.startswith("resource."):
            return grouputils.get_nested_attrs(self, key, False, *path)

        names = self._resource_names()
        if key == self.REFS:
            vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names]
            return attributes.select_from_attribute(vals, path)
        if key == self.REFS_MAP:
            refs_map = {
                n: grouputils.get_rsrc_id(self, key, False, n)
                for n in names
            }
            return refs_map
        if key == self.ATTR_ATTRIBUTES:
            return dict(
                (n, grouputils.get_rsrc_attr(self, key, False, n, *path))
                for n in names)

        path = [key] + list(path)
        return [
            grouputils.get_rsrc_attr(self, key, False, n, *path) for n in names
        ]

    def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
        for attr in self.referenced_attrs():
            if isinstance(attr, six.string_types):
                key, path = attr, []
            else:
                key, path = attr[0], list(attr[1:])
            output_name = self._attribute_output_name(key, *path)
            value = None

            if key.startswith("resource."):
                keycomponents = key.split('.', 2)
                res_name = keycomponents[1]
                attr_path = keycomponents[2:] + path
                if attr_path:
                    if res_name in resource_names:
                        value = get_attr_fn([res_name] + attr_path)
                else:
                    output_name = key = self.REFS_MAP
            elif key == self.ATTR_ATTRIBUTES and path:
                value = {r: get_attr_fn([r] + path) for r in resource_names}
            elif key not in self.ATTRIBUTES:
                value = [get_attr_fn([r, key] + path) for r in resource_names]

            if key == self.REFS:
                value = [get_res_fn(r) for r in resource_names]

            if value is not None:
                yield output.OutputDefinition(output_name, value)

        value = {r: get_res_fn(r) for r in resource_names}
        yield output.OutputDefinition(self.REFS_MAP, value)

    def build_resource_definition(self, res_name, res_defn):
        res_def = copy.deepcopy(res_defn)

        props = res_def.get(self.RESOURCE_DEF_PROPERTIES)
        if props:
            props = self._handle_repl_val(res_name, props)

        res_type = res_def[self.RESOURCE_DEF_TYPE]
        meta = res_def[self.RESOURCE_DEF_METADATA]

        return rsrc_defn.ResourceDefinition(res_name, res_type, props, meta)

    def get_resource_def(self, include_all=False):
        """Returns the resource definition portion of the group.

        :param include_all: if False, only properties for the resource
               definition that are not empty will be included
        :type include_all: bool
        :return: resource definition for the group
        :rtype: dict
        """

        # At this stage, we don't mind if all of the parameters have values
        # assigned. Pass in a custom resolver to the properties to not
        # error when a parameter does not have a user entered value.
        def ignore_param_resolve(snippet):
            if isinstance(snippet, function.Function):
                try:
                    return snippet.result()
                except exception.UserParameterMissing:
                    return None

            if isinstance(snippet, collections.Mapping):
                return dict(
                    (k, ignore_param_resolve(v)) for k, v in snippet.items())
            elif (not isinstance(snippet, six.string_types)
                  and isinstance(snippet, collections.Iterable)):
                return [ignore_param_resolve(v) for v in snippet]

            return snippet

        self.properties.resolve = ignore_param_resolve

        res_def = self.properties[self.RESOURCE_DEF]
        if not include_all:
            return self._clean_props(res_def)
        return res_def

    def _clean_props(self, res_defn):
        res_def = copy.deepcopy(res_defn)
        props = res_def.get(self.RESOURCE_DEF_PROPERTIES)
        if props:
            clean = dict((k, v) for k, v in props.items() if v is not None)
            props = clean
            res_def[self.RESOURCE_DEF_PROPERTIES] = props
        return res_def

    def _handle_repl_val(self, res_name, val):
        repl_var = self.properties[self.INDEX_VAR]

        def recurse(x):
            return self._handle_repl_val(res_name, x)

        if isinstance(val, six.string_types):
            return val.replace(repl_var, res_name)
        elif isinstance(val, collections.Mapping):
            return {k: recurse(v) for k, v in val.items()}
        elif isinstance(val, collections.Sequence):
            return [recurse(v) for v in val]
        return val

    def _add_output_defns_to_template(self, tmpl, resource_names):
        att_func = 'get_attr'
        get_attr = functools.partial(tmpl.functions[att_func], None, att_func)
        res_func = 'get_resource'
        get_res = functools.partial(tmpl.functions[res_func], None, res_func)
        for odefn in self._nested_output_defns(resource_names, get_attr,
                                               get_res):
            tmpl.add_output(odefn)

    def _assemble_nested(self,
                         names,
                         include_all=False,
                         template_version=('heat_template_version',
                                           '2015-04-30')):

        def_dict = self.get_resource_def(include_all)
        definitions = [(k, self.build_resource_definition(k, def_dict))
                       for k in names]
        tmpl = scl_template.make_template(definitions,
                                          version=template_version)
        self._add_output_defns_to_template(tmpl, [k for k, d in definitions])
        return tmpl

    def child_template_files(self, child_env):
        is_rolling_update = (self.action == self.UPDATE
                             and self.update_policy[self.ROLLING_UPDATE])
        return grouputils.get_child_template_files(self.context, self.stack,
                                                   is_rolling_update,
                                                   self.old_template_id)

    def _assemble_for_rolling_update(self,
                                     total_capacity,
                                     max_updates,
                                     include_all=False,
                                     template_version=('heat_template_version',
                                                       '2015-04-30')):
        names = list(self._resource_names(total_capacity))
        name_blacklist = self._name_blacklist()

        valid_resources = [(n, d)
                           for n, d in grouputils.get_member_definitions(self)
                           if n not in name_blacklist]

        targ_cap = self.get_size()

        def replace_priority(res_item):
            name, defn = res_item
            try:
                index = names.index(name)
            except ValueError:
                # High priority - delete immediately
                return 0
            else:
                if index < targ_cap:
                    # Update higher indices first
                    return targ_cap - index
                else:
                    # Low priority - don't update
                    return total_capacity

        old_resources = sorted(valid_resources, key=replace_priority)
        existing_names = set(n for n, d in valid_resources)
        new_names = six.moves.filterfalse(lambda n: n in existing_names, names)
        res_def = self.get_resource_def(include_all)
        definitions = scl_template.member_definitions(
            old_resources, res_def, total_capacity, max_updates,
            lambda: next(new_names), self.build_resource_definition)
        tmpl = scl_template.make_template(definitions,
                                          version=template_version)
        self._add_output_defns_to_template(tmpl, names)
        return tmpl

    def _try_rolling_update(self):
        if self.update_policy[self.ROLLING_UPDATE]:
            policy = self.update_policy[self.ROLLING_UPDATE]
            return self._replace(policy[self.MIN_IN_SERVICE],
                                 policy[self.MAX_BATCH_SIZE],
                                 policy[self.PAUSE_TIME])

    def _resolve_attribute(self, name):
        if name == self.REMOVED_RSRC_LIST:
            return self._current_blacklist()

    def _update_timeout(self, batch_cnt, pause_sec):
        total_pause_time = pause_sec * max(batch_cnt - 1, 0)
        if total_pause_time >= self.stack.timeout_secs():
            msg = _('The current update policy will result in stack update '
                    'timeout.')
            raise ValueError(msg)
        return self.stack.timeout_secs() - total_pause_time

    @staticmethod
    def _get_batches(targ_cap, curr_cap, batch_size, min_in_service):
        updated = 0

        while rolling_update.needs_update(targ_cap, curr_cap, updated):
            new_cap, total_new = rolling_update.next_batch(
                targ_cap, curr_cap, updated, batch_size, min_in_service)

            yield new_cap, total_new

            updated += total_new - max(new_cap - max(curr_cap, targ_cap), 0)
            curr_cap = new_cap

    def _replace(self, min_in_service, batch_size, pause_sec):
        def pause_between_batch(pause_sec):
            duration = timeutils.Duration(pause_sec)
            while not duration.expired():
                yield

        # current capacity not including existing blacklisted
        inspector = grouputils.GroupInspector.from_parent_resource(self)
        num_blacklist = self._count_black_listed(
            inspector.member_names(include_failed=False))
        num_resources = inspector.size(include_failed=True)
        curr_cap = num_resources - num_blacklist

        batches = list(
            self._get_batches(self.get_size(), curr_cap, batch_size,
                              min_in_service))
        update_timeout = self._update_timeout(len(batches), pause_sec)

        def tasks():
            for index, (curr_cap, max_upd) in enumerate(batches):
                yield scheduler.TaskRunner(self._run_update, curr_cap, max_upd,
                                           update_timeout)

                if index < (len(batches) - 1) and pause_sec > 0:
                    yield scheduler.TaskRunner(pause_between_batch, pause_sec)

        return list(tasks())

    def preview(self):
        # NOTE(pas-ha) just need to use include_all in _assemble_nested,
        # so this method is a simplified copy of preview() from StackResource,
        # and next two lines are basically a modified copy of child_template()
        names = self._resource_names()
        child_template = self._assemble_nested(names, include_all=True)
        params = self.child_params()
        name = "%s-%s" % (self.stack.name, self.name)
        self._nested = self._parse_nested_stack(name, child_template, params)

        return self.nested().preview_resources()

    def child_template(self):
        names = self._resource_names()
        return self._assemble_nested(names)

    def child_params(self):
        return {}

    def handle_adopt(self, resource_data):
        names = self._resource_names()
        if names:
            return self.create_with_template(self._assemble_nested(names), {},
                                             adopt_data=resource_data)

    def get_nested_parameters_stack(self):
        """Return a nested group of size 1 for validation."""
        names = self._resource_names(1)
        child_template = self._assemble_nested(names)
        params = self.child_params()
        name = "%s-%s" % (self.stack.name, self.name)
        return self._parse_nested_stack(name, child_template, params)
예제 #29
0
class Workflow(signal_responder.SignalResponder, resource.Resource):
    """A resource that implements Mistral workflow.

    Workflow represents a process that can be described in a various number of
    ways and that can do some job interesting to the end user. Each workflow
    consists of tasks (at least one) describing what exact steps should be made
    during workflow execution.

    For detailed description how to use Workflow, read Mistral documentation.
    """

    support_status = support.SupportStatus(version='2015.1')

    default_client_name = 'mistral'

    entity = 'workflows'

    PROPERTIES = (NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS,
                  TASK_DEFAULTS,
                  USE_REQUEST_BODY_AS_INPUT) = ('name', 'type', 'description',
                                                'input', 'output', 'tasks',
                                                'params', 'task_defaults',
                                                'use_request_body_as_input')

    _TASKS_KEYS = (TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE,
                   ON_SUCCESS, POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT,
                   REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE,
                   TIMEOUT, WITH_ITEMS, KEEP_RESULT, TARGET, JOIN,
                   CONCURRENCY) = ('name', 'description', 'on_error',
                                   'on_complete', 'on_success', 'policies',
                                   'action', 'workflow', 'publish', 'input',
                                   'requires', 'retry', 'wait_before',
                                   'wait_after', 'pause_before', 'timeout',
                                   'with_items', 'keep_result', 'target',
                                   'join', 'concurrency')

    _TASKS_TASK_DEFAULTS = [
        ON_ERROR, ON_COMPLETE, ON_SUCCESS, REQUIRES, RETRY, WAIT_BEFORE,
        WAIT_AFTER, PAUSE_BEFORE, TIMEOUT, CONCURRENCY
    ]

    _SIGNAL_DATA_KEYS = (SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS) = ('input',
                                                                   'params')

    ATTRIBUTES = (WORKFLOW_DATA, ALARM_URL, EXECUTIONS) = ('data', 'alarm_url',
                                                           'executions')

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING, _('Workflow name.')),
        TYPE:
        properties.Schema(
            properties.Schema.STRING,
            _('Workflow type.'),
            constraints=[constraints.AllowedValues(['direct', 'reverse'])],
            required=True,
            update_allowed=True),
        USE_REQUEST_BODY_AS_INPUT:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _('Defines the method in which the request body for signaling a '
              'workflow would be parsed. In case this property is set to '
              'True, the body would be parsed as a simple json where each '
              'key is a workflow input, in other cases body would be parsed '
              'expecting a specific json format with two keys: "input" and '
              '"params".'),
            update_allowed=True,
            support_status=support.SupportStatus(version='6.0.0')),
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Workflow description.'),
                          update_allowed=True),
        INPUT:
        properties.Schema(properties.Schema.MAP,
                          _('Dictionary which contains input for workflow.'),
                          update_allowed=True),
        OUTPUT:
        properties.Schema(properties.Schema.MAP,
                          _('Any data structure arbitrarily containing YAQL '
                            'expressions that defines workflow output. May be '
                            'nested.'),
                          update_allowed=True),
        PARAMS:
        properties.Schema(
            properties.Schema.MAP,
            _("Workflow additional parameters. If Workflow is reverse typed, "
              "params requires 'task_name', which defines initial task."),
            update_allowed=True),
        TASK_DEFAULTS:
        properties.Schema(
            properties.Schema.MAP,
            _("Default settings for some of task "
              "attributes defined "
              "at workflow level."),
            support_status=support.SupportStatus(version='5.0.0'),
            schema={
                ON_SUCCESS:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed successfully.')),
                ON_ERROR:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed with an error.')),
                ON_COMPLETE:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which will run after '
                      'the task has completed regardless of whether '
                      'it is successful or not.')),
                REQUIRES:
                properties.Schema(
                    properties.Schema.LIST,
                    _('List of tasks which should be executed before '
                      'this task. Used only in reverse workflows.')),
                RETRY:
                properties.Schema(
                    properties.Schema.MAP,
                    _('Defines a pattern how task should be repeated in '
                      'case of an error.')),
                WAIT_BEFORE:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a delay in seconds that Mistral Engine '
                      'should wait before starting a task.')),
                WAIT_AFTER:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a delay in seconds that Mistral Engine '
                      'should wait after a task has completed before '
                      'starting next tasks defined in '
                      'on-success, on-error or on-complete.')),
                PAUSE_BEFORE:
                properties.Schema(
                    properties.Schema.BOOLEAN,
                    _('Defines whether Mistral Engine should put the '
                      'workflow on hold or not before starting a task.')),
                TIMEOUT:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a period of time in seconds after which '
                      'a task will be failed automatically '
                      'by engine if hasn\'t completed.')),
                CONCURRENCY:
                properties.Schema(
                    properties.Schema.INTEGER,
                    _('Defines a max number of actions running simultaneously '
                      'in a task. Applicable only for tasks that have '
                      'with-items.'),
                    support_status=support.SupportStatus(version='8.0.0'))
            },
            update_allowed=True),
        TASKS:
        properties.Schema(
            properties.Schema.LIST,
            _('Dictionary containing workflow tasks.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    TASK_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      _('Task name.'),
                                      required=True),
                    TASK_DESCRIPTION:
                    properties.Schema(properties.Schema.STRING,
                                      _('Task description.')),
                    TASK_INPUT:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Actual input parameter values of the task.')),
                    ACTION:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the action associated with the task. '
                          'Either action or workflow may be defined in the '
                          'task.')),
                    WORKFLOW:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the workflow associated with the task. '
                          'Can be defined by intrinsic function get_resource '
                          'or by name of the referenced workflow, i.e. '
                          '{ workflow: wf_name } or '
                          '{ workflow: { get_resource: wf_name }}. Either '
                          'action or workflow may be defined in the task.'),
                        constraints=[
                            constraints.CustomConstraint('mistral.workflow')
                        ]),
                    PUBLISH:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Dictionary of variables to publish to '
                          'the workflow context.')),
                    ON_SUCCESS:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed successfully.')),
                    ON_ERROR:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed with an error.')),
                    ON_COMPLETE:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which will run after '
                          'the task has completed regardless of whether '
                          'it is successful or not.')),
                    POLICIES:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Dictionary-like section defining task policies '
                          'that influence how Mistral Engine runs tasks. Must '
                          'satisfy Mistral DSL v2.'),
                        support_status=support.SupportStatus(
                            status=support.HIDDEN,
                            version='8.0.0',
                            message=_('Add needed policies directly to '
                                      'the task, Policy keyword is not '
                                      'needed'),
                            previous_status=support.SupportStatus(
                                status=support.DEPRECATED,
                                version='5.0.0',
                                previous_status=support.SupportStatus(
                                    version='2015.1')))),
                    REQUIRES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('List of tasks which should be executed before '
                          'this task. Used only in reverse workflows.')),
                    RETRY:
                    properties.Schema(
                        properties.Schema.MAP,
                        _('Defines a pattern how task should be repeated in '
                          'case of an error.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    WAIT_BEFORE:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a delay in seconds that Mistral Engine '
                          'should wait before starting a task.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    WAIT_AFTER:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a delay in seconds that Mistral '
                          'Engine should wait after '
                          'a task has completed before starting next tasks '
                          'defined in on-success, on-error or on-complete.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    PAUSE_BEFORE:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Defines whether Mistral Engine should '
                          'put the workflow on hold '
                          'or not before starting a task.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    TIMEOUT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a period of time in seconds after which a '
                          'task will be failed automatically by engine '
                          'if hasn\'t completed.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    WITH_ITEMS:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('If configured, it allows to run action or workflow '
                          'associated with a task multiple times '
                          'on a provided list of items.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    KEEP_RESULT:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('Allowing not to store action results '
                          'after task completion.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    CONCURRENCY:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Defines a max number of actions running '
                          'simultaneously in a task. Applicable only for '
                          'tasks that have with-items.'),
                        support_status=support.SupportStatus(version='8.0.0')),
                    TARGET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('It defines an executor to which task action '
                          'should be sent to.'),
                        support_status=support.SupportStatus(version='5.0.0')),
                    JOIN:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Allows to synchronize multiple parallel workflow '
                          'branches and aggregate their data. '
                          'Valid inputs: all - the task will run only if '
                          'all upstream tasks are completed. '
                          'Any numeric value - then the task will run once '
                          'at least this number of upstream tasks are '
                          'completed and corresponding conditions have '
                          'triggered.'),
                        support_status=support.SupportStatus(version='6.0.0')),
                },
            ),
            required=True,
            update_allowed=True,
            constraints=[constraints.Length(min=1)])
    }

    attributes_schema = {
        WORKFLOW_DATA:
        attributes.Schema(
            _('A dictionary which contains name and input of the workflow.'),
            type=attributes.Schema.MAP),
        ALARM_URL:
        attributes.Schema(_(
            "A signed url to create executions for workflows specified in "
            "Workflow resource."),
                          type=attributes.Schema.STRING),
        EXECUTIONS:
        attributes.Schema(_(
            "List of workflows' executions, each of them is a dictionary "
            "with information about execution. Each dictionary returns "
            "values for next keys: id, workflow_name, created_at, "
            "updated_at, state for current execution state, input, output."),
                          type=attributes.Schema.LIST)
    }

    def translation_rules(self, properties):
        policies_keys = [
            self.PAUSE_BEFORE, self.WAIT_AFTER, self.WAIT_BEFORE, self.TIMEOUT,
            self.CONCURRENCY, self.RETRY
        ]
        rules = []
        for key in policies_keys:
            rules.append(
                translation.TranslationRule(
                    properties,
                    translation.TranslationRule.REPLACE, [self.TASKS, key],
                    value_name=self.POLICIES,
                    custom_value_path=[key]))
        # after executing rules above properties data contains policies key
        # with empty dict value, so need to remove policies from properties.
        rules.append(
            translation.TranslationRule(properties,
                                        translation.TranslationRule.DELETE,
                                        [self.TASKS, self.POLICIES]))
        return rules

    def get_reference_id(self):
        return self._workflow_name()

    def _get_inputs_and_params(self, data):
        inputs = None
        params = None
        if self.properties.get(self.USE_REQUEST_BODY_AS_INPUT):
            inputs = data
        else:
            if data is not None:
                inputs = data.get(self.SIGNAL_DATA_INPUT)
                params = data.get(self.SIGNAL_DATA_PARAMS)
        return inputs, params

    def _validate_signal_data(self, inputs, params):
        if inputs is not None:
            if not isinstance(inputs, dict):
                message = (_('Input in signal data must be a map, '
                             'find a %s') % type(inputs))
                raise exception.StackValidationFailed(
                    error=_('Signal data error'), message=message)
            for key in inputs:
                if (self.properties.get(self.INPUT) is None
                        or key not in self.properties.get(self.INPUT)):
                    message = _('Unknown input %s') % key
                    raise exception.StackValidationFailed(
                        error=_('Signal data error'), message=message)
        if params is not None and not isinstance(params, dict):
            message = (_('Params must be a map, find a ' '%s') % type(params))
            raise exception.StackValidationFailed(error=_('Signal data error'),
                                                  message=message)

    def validate(self):
        super(Workflow, self).validate()
        if self.properties.get(self.TYPE) == 'reverse':
            params = self.properties.get(self.PARAMS)
            if params is None or not params.get('task_name'):
                raise exception.StackValidationFailed(
                    error=_('Mistral resource validation error'),
                    path=[
                        self.name,
                        ('properties' if self.stack.t.VERSION
                         == 'heat_template_version' else 'Properties'),
                        self.PARAMS
                    ],
                    message=_("'task_name' is not assigned in 'params' "
                              "in case of reverse type workflow."))
        for task in self.properties.get(self.TASKS):
            wf_value = task.get(self.WORKFLOW)
            action_value = task.get(self.ACTION)
            if wf_value and action_value:
                raise exception.ResourcePropertyConflict(
                    self.WORKFLOW, self.ACTION)
            if not wf_value and not action_value:
                raise exception.PropertyUnspecifiedError(
                    self.WORKFLOW, self.ACTION)
            if (task.get(self.REQUIRES) is not None
                    and self.properties.get(self.TYPE)) == 'direct':
                msg = _("task %(task)s contains property 'requires' "
                        "in case of direct workflow. Only reverse workflows "
                        "can contain property 'requires'.") % {
                            'name': self.name,
                            'task': task.get(self.TASK_NAME)
                        }
                raise exception.StackValidationFailed(
                    error=_('Mistral resource validation error'),
                    path=[
                        self.name,
                        ('properties' if self.stack.t.VERSION
                         == 'heat_template_version' else 'Properties'),
                        self.TASKS,
                        task.get(self.TASK_NAME), self.REQUIRES
                    ],
                    message=msg)

            if task.get(self.POLICIES) is not None:
                for task_item in task.get(self.POLICIES):
                    if task.get(task_item) is not None:
                        msg = _('Property %(policies)s and %(item)s cannot be '
                                'used both at one time.') % {
                                    'policies': self.POLICIES,
                                    'item': task_item
                                }
                        raise exception.StackValidationFailed(message=msg)

            if (task.get(self.WITH_ITEMS) is None
                    and task.get(self.CONCURRENCY) is not None):
                raise exception.ResourcePropertyDependency(
                    prop1=self.CONCURRENCY, prop2=self.WITH_ITEMS)

    def _workflow_name(self):
        return self.properties.get(self.NAME) or self.physical_resource_name()

    def build_tasks(self, props):
        for task in props[self.TASKS]:
            current_task = {}
            wf_value = task.get(self.WORKFLOW)
            if wf_value is not None:
                current_task.update({self.WORKFLOW: wf_value})

            # backward support for kilo.
            if task.get(self.POLICIES) is not None:
                task.update(task.get(self.POLICIES))

            task_keys = [
                key for key in self._TASKS_KEYS
                if key not in [self.WORKFLOW, self.TASK_NAME, self.POLICIES]
            ]
            for task_prop in task_keys:
                if task.get(task_prop) is not None:
                    current_task.update(
                        {task_prop.replace('_', '-'): task[task_prop]})

            yield {task[self.TASK_NAME]: current_task}

    def prepare_properties(self, props):
        """Prepare correct YAML-formatted definition for Mistral."""
        defn_name = self._workflow_name()
        definition = {
            'version': '2.0',
            defn_name: {
                self.TYPE: props.get(self.TYPE),
                self.DESCRIPTION: props.get(self.DESCRIPTION),
                self.OUTPUT: props.get(self.OUTPUT)
            }
        }
        for key in list(definition[defn_name].keys()):
            if definition[defn_name][key] is None:
                del definition[defn_name][key]
        if props.get(self.INPUT) is not None:
            definition[defn_name][self.INPUT] = list(
                props.get(self.INPUT).keys())
        definition[defn_name][self.TASKS] = {}
        for task in self.build_tasks(props):
            definition.get(defn_name).get(self.TASKS).update(task)

        if props.get(self.TASK_DEFAULTS) is not None:
            definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
                k.replace('_', '-'): v
                for k, v in six.iteritems(props.get(self.TASK_DEFAULTS)) if v
            }

        return yaml.dump(definition,
                         Dumper=yaml.CSafeDumper if hasattr(
                             yaml, 'CSafeDumper') else yaml.SafeDumper)

    def handle_create(self):
        super(Workflow, self).handle_create()
        props = self.prepare_properties(self.properties)
        try:
            workflow = self.client().workflows.create(props)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)
        # NOTE(prazumovsky): Mistral uses unique names for resource
        # identification.
        self.resource_id_set(workflow[0].name)

    def handle_signal(self, details=None):
        inputs, params = self._get_inputs_and_params(details)
        self._validate_signal_data(inputs, params)

        inputs_result = copy.deepcopy(self.properties[self.INPUT])
        params_result = copy.deepcopy(self.properties[self.PARAMS]) or {}
        # NOTE(prazumovsky): Signal can contains some data, interesting
        # for workflow, e.g. inputs. So, if signal data contains input
        # we update override inputs, other leaved defined in template.
        if inputs:
            inputs_result.update(inputs)
        if params:
            params_result.update(params)

        try:
            execution = self.client().executions.create(
                self._workflow_name(), jsonutils.dumps(inputs_result),
                **params_result)
        except Exception as ex:
            raise exception.ResourceFailure(ex, self)
        executions = [execution.id]
        if self.EXECUTIONS in self.data():
            executions.extend(self.data().get(self.EXECUTIONS).split(','))
        self.data_set(self.EXECUTIONS, ','.join(executions))

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            props = json_snippet.properties(self.properties_schema,
                                            self.context)
            new_props = self.prepare_properties(props)
            try:
                workflow = self.client().workflows.update(new_props)
            except Exception as ex:
                raise exception.ResourceFailure(ex, self)
            self.data_set(self.NAME, workflow[0].name)
            self.resource_id_set(workflow[0].name)

    def _delete_executions(self):
        if self.data().get(self.EXECUTIONS):
            for id in self.data().get(self.EXECUTIONS).split(','):
                with self.client_plugin().ignore_not_found:
                    self.client().executions.delete(id)

            self.data_delete('executions')

    def handle_delete(self):
        self._delete_executions()
        return super(Workflow, self).handle_delete()

    def _resolve_attribute(self, name):
        if name == self.EXECUTIONS:
            if self.EXECUTIONS not in self.data():
                return []

            def parse_execution_response(execution):
                return {
                    'id': execution.id,
                    'workflow_name': execution.workflow_name,
                    'created_at': execution.created_at,
                    'updated_at': execution.updated_at,
                    'state': execution.state,
                    'input': jsonutils.loads(six.text_type(execution.input)),
                    'output': jsonutils.loads(six.text_type(execution.output))
                }

            return [
                parse_execution_response(self.client().executions.get(exec_id))
                for exec_id in self.data().get(self.EXECUTIONS).split(',')
            ]

        elif name == self.WORKFLOW_DATA:
            return {
                self.NAME: self.resource_id,
                self.INPUT: self.properties.get(self.INPUT)
            }

        elif name == self.ALARM_URL and self.resource_id is not None:
            return six.text_type(self._get_ec2_signed_url())
예제 #30
0
파일: domain.py 프로젝트: yuzhouallin/heat
class DesignateDomain(resource.Resource):
    """Heat Template Resource for Designate Domain.

    Designate provides DNS-as-a-Service services for OpenStack. So, domain
    is a realm with an identification string, unique in DNS.
    """

    support_status = support.SupportStatus(version='5.0.0')

    entity = 'domains'

    PROPERTIES = (NAME, TTL, DESCRIPTION, EMAIL) = ('name', 'ttl',
                                                    'description', 'email')

    ATTRIBUTES = (SERIAL, ) = ('serial', )

    properties_schema = {
        # Based on RFC 1035, length of name is set to max of 255
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Domain name.'),
                          required=True,
                          constraints=[constraints.Length(max=255)]),
        # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
        TTL:
        properties.Schema(
            properties.Schema.INTEGER,
            _('Time To Live (Seconds).'),
            update_allowed=True,
            constraints=[constraints.Range(min=1, max=2147483647)]),
        # designate mandates to the max length of 160 for description
        DESCRIPTION:
        properties.Schema(properties.Schema.STRING,
                          _('Description of domain.'),
                          update_allowed=True,
                          constraints=[constraints.Length(max=160)]),
        EMAIL:
        properties.Schema(properties.Schema.STRING,
                          _('Domain email.'),
                          update_allowed=True,
                          required=True)
    }

    attributes_schema = {
        SERIAL:
        attributes.Schema(_("DNS domain serial."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'designate'

    entity = 'domains'

    def handle_create(self):
        args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
        domain = self.client_plugin().domain_create(**args)

        self.resource_id_set(domain.id)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        args = dict()

        if prop_diff.get(self.EMAIL):
            args['email'] = prop_diff.get(self.EMAIL)

        if prop_diff.get(self.TTL):
            args['ttl'] = prop_diff.get(self.TTL)

        if prop_diff.get(self.DESCRIPTION):
            args['description'] = prop_diff.get(self.DESCRIPTION)

        if len(args.keys()) > 0:
            args['id'] = self.resource_id
            self.client_plugin().domain_update(**args)

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.SERIAL:
            domain = self.client().domains.get(self.resource_id)
            return domain.serial

    # FIXME(kanagaraj-manickam) Remove this method once designate defect
    # 1485552 is fixed.
    def _show_resource(self):
        return dict(self.client().domains.get(self.resource_id).items())

    def parse_live_resource_data(self, resource_properties, resource_data):
        domain_reality = {}

        for key in self.PROPERTIES:
            domain_reality.update({key: resource_data.get(key)})

        return domain_reality