Esempio n. 1
0
 def test_allowed_pattern_invalid_type(self):
     schema = constraints.Schema(
         'Integer', constraints=[constraints.AllowedPattern('[0-9]*')])
     err = self.assertRaises(constraints.InvalidSchemaError,
                             schema.validate)
     self.assertIn('AllowedPattern constraint invalid for Integer',
                   str(err))
Esempio n. 2
0
 def _constraint_from_def(cls, constraint):
     desc = constraint.get(DESCRIPTION)
     if RANGE in constraint:
         cdef = constraint.get(RANGE)
         cls._check_dict(cdef, RANGE_KEYS, 'range constraint')
         return constr.Range(parameters.Schema.get_num(MIN, cdef),
                             parameters.Schema.get_num(MAX, cdef),
                             desc)
     elif LENGTH in constraint:
         cdef = constraint.get(LENGTH)
         cls._check_dict(cdef, RANGE_KEYS, 'length constraint')
         return constr.Length(parameters.Schema.get_num(MIN, cdef),
                              parameters.Schema.get_num(MAX, cdef),
                              desc)
     elif ALLOWED_VALUES in constraint:
         cdef = constraint.get(ALLOWED_VALUES)
         return constr.AllowedValues(cdef, desc)
     elif ALLOWED_PATTERN in constraint:
         cdef = constraint.get(ALLOWED_PATTERN)
         return constr.AllowedPattern(cdef, desc)
     elif CUSTOM_CONSTRAINT in constraint:
         cdef = constraint.get(CUSTOM_CONSTRAINT)
         return constr.CustomConstraint(cdef, desc)
     else:
         raise exception.InvalidSchemaError(
             message=_("No constraint expressed"))
Esempio n. 3
0
        def constraints():
            constraints = schema_dict.get(CONSTRAINTS)
            if constraints is None:
                return

            for constraint in constraints:
                desc = constraint.get(DESCRIPTION)
                if RANGE in constraint:
                    cdef = constraint.get(RANGE)
                    yield constr.Range(parameters.Schema.get_num(MIN, cdef),
                                       parameters.Schema.get_num(MAX, cdef),
                                       desc)
                if LENGTH in constraint:
                    cdef = constraint.get(LENGTH)
                    yield constr.Length(parameters.Schema.get_num(MIN, cdef),
                                        parameters.Schema.get_num(MAX, cdef),
                                        desc)
                if ALLOWED_VALUES in constraint:
                    cdef = constraint.get(ALLOWED_VALUES)
                    yield constr.AllowedValues(cdef, desc)
                if ALLOWED_PATTERN in constraint:
                    cdef = constraint.get(ALLOWED_PATTERN)
                    yield constr.AllowedPattern(cdef, desc)
                if CUSTOM_CONSTRAINT in constraint:
                    cdef = constraint.get(CUSTOM_CONSTRAINT)
                    yield constr.CustomConstraint(cdef, desc)
Esempio n. 4
0
class VolumeAttachment(resource.Resource):
    PROPERTIES = (
        INSTANCE_ID,
        VOLUME_ID,
        DEVICE,
    ) = (
        'InstanceId',
        'VolumeId',
        'Device',
    )

    properties_schema = {
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the instance to which the volume attaches.'),
            immutable=True,
            required=True),
        VOLUME_ID:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the volume to be attached.'),
                          immutable=True,
                          required=True),
        DEVICE:
        properties.Schema(
            properties.Schema.STRING,
            _('The device where the volume is exposed on the instance. This '
              'assignment may not be honored and it is advised that the path '
              '/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
            immutable=True,
            required=True,
            constraints=[
                constraints.AllowedPattern('/dev/vd[b-z]'),
            ]),
    }

    def handle_create(self):
        server_id = self.properties[self.INSTANCE_ID]
        volume_id = self.properties[self.VOLUME_ID]
        dev = self.properties[self.DEVICE]

        attach_task = VolumeAttachTask(self.stack, server_id, volume_id, dev)
        attach_runner = scheduler.TaskRunner(attach_task)

        attach_runner.start()

        self.resource_id_set(attach_task.attachment_id)

        return attach_runner

    def check_create_complete(self, attach_runner):
        return attach_runner.step()

    def handle_delete(self):
        server_id = self.properties[self.INSTANCE_ID]
        detach_task = VolumeDetachTask(self.stack, server_id, self.resource_id)
        scheduler.TaskRunner(detach_task)()
Esempio n. 5
0
        def constraints():
            desc = schema_dict.get(CONSTRAINT_DESCRIPTION)

            if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
                yield constr.Range(Schema.get_num(MIN_VALUE, schema_dict),
                                   Schema.get_num(MAX_VALUE, schema_dict),
                                   desc)
            if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
                yield constr.Length(Schema.get_num(MIN_LENGTH, schema_dict),
                                    Schema.get_num(MAX_LENGTH, schema_dict),
                                    desc)
            if ALLOWED_VALUES in schema_dict:
                yield constr.AllowedValues(schema_dict[ALLOWED_VALUES], desc)
            if ALLOWED_PATTERN in schema_dict:
                yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN], desc)
Esempio n. 6
0
        def constraints():
            def get_num(key):
                val = schema_dict.get(key)
                if val is not None:
                    val = Schema.str_to_num(val)
                return val

            if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
                yield constr.Range(get_num(MIN_VALUE), get_num(MAX_VALUE))
            if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
                yield constr.Length(get_num(MIN_LENGTH), get_num(MAX_LENGTH))
            if ALLOWED_VALUES in schema_dict:
                yield constr.AllowedValues(schema_dict[ALLOWED_VALUES])
            if ALLOWED_PATTERN in schema_dict:
                yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN])
Esempio n. 7
0
        def constraints():
            desc = param.get(parameters.CONSTRAINT_DESCRIPTION)

            if parameters.MIN_VALUE in param or parameters.MAX_VALUE in param:
                yield constr.Range(get_num(parameters.MIN_VALUE),
                                   get_num(parameters.MAX_VALUE))
            if (parameters.MIN_LENGTH in param or
                    parameters.MAX_LENGTH in param):
                yield constr.Length(get_num(parameters.MIN_LENGTH),
                                    get_num(parameters.MAX_LENGTH))
            if parameters.ALLOWED_VALUES in param:
                yield constr.AllowedValues(param[parameters.ALLOWED_VALUES],
                                           desc)
            if parameters.ALLOWED_PATTERN in param:
                yield constr.AllowedPattern(param[parameters.ALLOWED_PATTERN],
                                            desc)
Esempio n. 8
0
        def constraints():
            constraints = schema_dict.get(CONSTRAINTS)
            if constraints is None:
                return

            if not isinstance(constraints, list):
                raise constr.InvalidSchemaError(
                    _("Invalid parameter constraints for parameter %s, "
                      "expected a list") % param_name)

            valid_keys = (DESCRIPTION, LENGTH, RANGE, ALLOWED_VALUES,
                          ALLOWED_PATTERN, CUSTOM_CONSTRAINT)

            for constraint in constraints:
                cls._check_dict(constraint, valid_keys,
                                'parameter constraints')
                desc = constraint.get(DESCRIPTION)
                if RANGE in constraint:
                    cdef = constraint.get(RANGE)
                    cls._check_dict(cdef, (MIN, MAX), 'range constraint')
                    yield constr.Range(parameters.Schema.get_num(MIN, cdef),
                                       parameters.Schema.get_num(MAX, cdef),
                                       desc)
                elif LENGTH in constraint:
                    cdef = constraint.get(LENGTH)
                    cls._check_dict(cdef, (MIN, MAX), 'length constraint')
                    yield constr.Length(parameters.Schema.get_num(MIN, cdef),
                                        parameters.Schema.get_num(MAX, cdef),
                                        desc)
                elif ALLOWED_VALUES in constraint:
                    cdef = constraint.get(ALLOWED_VALUES)
                    yield constr.AllowedValues(cdef, desc)
                elif ALLOWED_PATTERN in constraint:
                    cdef = constraint.get(ALLOWED_PATTERN)
                    yield constr.AllowedPattern(cdef, desc)
                elif CUSTOM_CONSTRAINT in constraint:
                    cdef = constraint.get(CUSTOM_CONSTRAINT)
                    yield constr.CustomConstraint(cdef, desc)
                else:
                    raise constr.InvalidSchemaError(
                        _("No constraint expressed"))
Esempio n. 9
0
        def constraints_hot():
            constraints = param.get(hot.CONSTRAINTS)
            if constraints is None:
                return

            for constraint in constraints:
                desc = constraint.get(hot.DESCRIPTION)
                if hot.RANGE in constraint:
                    const_def = constraint.get(hot.RANGE)
                    yield constr.Range(get_num(hot.MIN, const_def),
                                       get_num(hot.MAX, const_def), desc)
                if hot.LENGTH in constraint:
                    const_def = constraint.get(hot.LENGTH)
                    yield constr.Length(get_num(hot.MIN, const_def),
                                        get_num(hot.MAX, const_def), desc)
                if hot.ALLOWED_VALUES in constraint:
                    const_def = constraint.get(hot.ALLOWED_VALUES)
                    yield constr.AllowedValues(const_def, desc)
                if hot.ALLOWED_PATTERN in constraint:
                    const_def = constraint.get(hot.ALLOWED_PATTERN)
                    yield constr.AllowedPattern(const_def, desc)
Esempio n. 10
0
class VolumeAttachment(vb.BaseVolumeAttachment):
    PROPERTIES = (
        INSTANCE_ID, VOLUME_ID, DEVICE,
    ) = (
        'InstanceId', 'VolumeId', 'Device',
    )

    properties_schema = {
        INSTANCE_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the instance to which the volume attaches.'),
            immutable=True,
            required=True,
            constraints=[
                constraints.CustomConstraint('nova.server')
            ]
        ),
        VOLUME_ID: properties.Schema(
            properties.Schema.STRING,
            _('The ID of the volume to be attached.'),
            immutable=True,
            required=True,
            constraints=[
                constraints.CustomConstraint('cinder.volume')
            ]
        ),
        DEVICE: properties.Schema(
            properties.Schema.STRING,
            _('The device where the volume is exposed on the instance. This '
              'assignment may not be honored and it is advised that the path '
              '/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
            immutable=True,
            required=True,
            constraints=[
                constraints.AllowedPattern('/dev/vd[b-z]'),
            ]
        ),
    }
Esempio n. 11
0
class OSDBInstance(resource.Resource):
    '''
    OpenStack cloud database instance resource.
    '''

    PROPERTIES = (
        NAME, FLAVOR, SIZE, DATABASES, USERS, AVAILABILITY_ZONE,
        RESTORE_POINT,
    ) = (
        'name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
        'restore_point',
    )

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET, DATABASE_COLLATE, DATABASE_NAME,
    ) = (
        'character_set', 'collate', 'name',
    )

    _USER_KEYS = (
        USER_NAME, USER_PASSWORD, USER_HOST, USER_DATABASES,
    ) = (
        'name', 'password', 'host', 'databases',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the DB instance to create.'),
            required=True,
            constraints=[
                constraints.Length(max=255),
            ]
        ),
        FLAVOR: properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True
        ),
        SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _('Database volume size in GB.'),
            required=True,
            constraints=[
                constraints.Range(1, 150),
            ]
        ),
        DATABASES: properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET: properties.Schema(
                        properties.Schema.STRING,
                        _('Set of symbols and encodings.'),
                        default='utf8'
                    ),
                    DATABASE_COLLATE: properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'
                    ),
                    DATABASE_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]
                    ),
                },
            )
        ),
        USERS: properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]
                    ),
                    USER_PASSWORD: properties.Schema(
                        properties.Schema.STRING,
                        _('Password for those users on instance '
                          'creation.'),
                        required=True,
                        constraints=[
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]
                    ),
                    USER_HOST: properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%'
                    ),
                    USER_DATABASES: properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(
                            properties.Schema.STRING,
                        ),
                        required=True
                    ),
                },
            )
        ),
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _('Name of the availability zone for DB instance.')
        ),
        RESTORE_POINT: properties.Schema(
            properties.Schema.STRING,
            _('DB instance restore point.')
        ),
    }

    attributes_schema = {
        "hostname": _("Hostname of the instance"),
        "href": _("Api endpoint reference of the instance")
    }

    def __init__(self, name, json_snippet, stack):
        super(OSDBInstance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.trove().instances.get(self.resource_id)

        return self._dbinstance

    def physical_resource_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name

        return super(OSDBInstance, self).physical_resource_name()

    def handle_create(self):
        '''
        Create cloud database instance.
        '''
        self.dbinstancename = self.physical_resource_name()
        self.flavor = nova_utils.get_flavor_id(self.trove(),
                                               self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties.get(self.DATABASES)
        self.users = self.properties.get(self.USERS)
        restore_point = self.properties.get(self.RESTORE_POINT)
        zone = self.properties.get(self.AVAILABILITY_ZONE)

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # create db instance
        instance = self.trove().instances.create(
            self.dbinstancename,
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone)
        self.resource_id_set(instance.id)

        return instance

    def _refresh_instance(self, instance):
        try:
            instance.get()
        except troveclient.exceptions.RequestEntityTooLarge as exc:
            msg = _("Stack %(name)s (%(id)s) received an OverLimit "
                    "response during instance.get(): %(exception)s")
            logger.warning(msg % {'name': self.stack.name,
                                  'id': self.stack.id,
                                  'exception': exc})

    def check_create_complete(self, instance):
        '''
        Check if cloud DB instance creation is complete.
        '''
        self._refresh_instance(instance)

        if instance.status == 'ERROR':
            raise exception.Error(_("Database instance creation failed."))

        if instance.status != 'ACTIVE':
            return False

        msg = _("Database instance %(database)s created (flavor:%(flavor)s, "
                "volume:%(volume)s)")
        logger.info(msg % ({'database': self.dbinstancename,
                            'flavor': self.flavor,
                            'volume': self.volume}))
        return True

    def handle_delete(self):
        '''
        Delete a cloud database instance.
        '''
        if not self.resource_id:
            return

        instance = None
        try:
            instance = self.trove().instances.get(self.resource_id)
        except troveclient.exceptions.NotFound:
            logger.debug("Database instance %s not found." %
                         self.resource_id)
            self.resource_id_set(None)
        else:
            instance.delete()
            return instance

    def check_delete_complete(self, instance):
        '''
        Check for completion of cloud DB instance delettion
        '''
        if not instance:
            return True

        try:
            self._refresh_instance(instance)
        except troveclient.exceptions.NotFound:
            self.resource_id_set(None)
            return True

        return False

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(OSDBInstance, self).validate()
        if res:
            return res

        # check validity of user and databases
        users = self.properties.get(self.USERS)
        if not users:
            return

        databases = self.properties.get(self.DATABASES)
        if not databases:
            msg = _('Databases property is required if users property'
                    ' is provided')
            raise exception.StackValidationFailed(message=msg)

        db_names = set([db[self.DATABASE_NAME] for db in databases])
        for user in users:
            if not user.get(self.USER_DATABASES, []):
                msg = _('Must provide access to at least one database for '
                        'user %s') % user[self.USER_NAME]
                raise exception.StackValidationFailed(message=msg)

            missing_db = [db_name for db_name in user[self.USER_DATABASES]
                          if db_name not in db_names]

            if missing_db:
                msg = _('Database %s specified for user does not exist in '
                        'databases.') % missing_db
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link['href']
                        break

        return self._href

    def _resolve_attribute(self, name):
        if name == 'hostname':
            return self.dbinstance.hostname
        elif name == 'href':
            return self.href()
Esempio n. 12
0
class SaharaNodeGroupTemplate(resource.Resource):
    """A resource for managing Sahara node group templates.

    A Node Group Template describes a group of nodes within cluster. It
    contains a list of hadoop processes that will be launched on each instance
    in a group. Also a Node Group Template may provide node scoped
    configurations for those processes.
    """

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
        VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE,
        SECURITY_GROUPS, AUTO_SECURITY_GROUP,
        AVAILABILITY_ZONE, VOLUMES_AVAILABILITY_ZONE,
        NODE_PROCESSES, FLOATING_IP_POOL, NODE_CONFIGS, IMAGE_ID,
        IS_PROXY_GATEWAY, VOLUME_LOCAL_TO_INSTANCE, USE_AUTOCONFIG,
        SHARES

    ) = (
        'name', 'plugin_name', 'hadoop_version', 'flavor', 'description',
        'volumes_per_node', 'volumes_size', 'volume_type',
        'security_groups', 'auto_security_group',
        'availability_zone', 'volumes_availability_zone',
        'node_processes', 'floating_ip_pool', 'node_configs', 'image_id',
        'is_proxy_gateway', 'volume_local_to_instance', 'use_autoconfig',
        'shares'
    )

    _SHARE_KEYS = (
        SHARE_ID, PATH, ACCESS_LEVEL
    ) = (
        'id', 'path', 'access_level'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Node Group Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Node Group Template.'),
            default="",
            update_allowed=True
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ],
            update_allowed=True
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
            update_allowed=True
        ),
        FLAVOR: properties.Schema(
            properties.Schema.STRING,
            _('Name or ID Nova flavor for the nodes.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('nova.flavor')
            ],
            update_allowed=True
        ),
        VOLUMES_PER_NODE: properties.Schema(
            properties.Schema.INTEGER,
            _("Volumes per node."),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0,
            update_allowed=True
        ),
        VOLUMES_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _("Size of the volumes, in GB."),
            constraints=[
                constraints.Range(min=1),
            ],
            update_allowed=True
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _("Type of the volume to create on Cinder backend."),
            constraints=[
                constraints.CustomConstraint('cinder.vtype')
            ],
            update_allowed=True
        ),
        SECURITY_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _("List of security group names or IDs to assign to this "
              "Node Group template."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        AUTO_SECURITY_GROUP: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Defines whether auto-assign security group to this "
              "Node Group template."),
            update_allowed=True
        ),
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create servers in."),
            update_allowed=True
        ),
        VOLUMES_AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create volumes in."),
            update_allowed=True
        ),
        NODE_PROCESSES: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to run on every node."),
            required=True,
            constraints=[
                constraints.Length(min=1),
            ],
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        FLOATING_IP_POOL: properties.Schema(
            properties.Schema.STRING,
            _("Name or UUID of the Neutron floating IP network or "
              "name of the Nova floating ip pool to use. "
              "Should not be provided when used with Nova-network "
              "that auto-assign floating IPs."),
            update_allowed=True
        ),
        NODE_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _("Dictionary of node configurations."),
            update_allowed=True
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            update_allowed=True
        ),
        IS_PROXY_GATEWAY: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Provide access to nodes using other nodes of the cluster "
              "as proxy gateways."),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True
        ),
        VOLUME_LOCAL_TO_INSTANCE: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Create volumes on the same physical port as an instance."),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0'),
            update_allowed=True
        ),
        SHARES: properties.Schema(
            properties.Schema.LIST,
            _("List of manila shares to be mounted."),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SHARE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("Id of the manila share."),
                        required=True
                    ),
                    PATH: properties.Schema(
                        properties.Schema.STRING,
                        _("Local path on each cluster node on which to mount "
                          "the share. Defaults to '/mnt/{share_id}'.")
                    ),
                    ACCESS_LEVEL: properties.Schema(
                        properties.Schema.STRING,
                        _("Governs permissions set in manila for the cluster "
                          "ips."),
                        constraints=[
                            constraints.AllowedValues(['rw', 'ro']),
                        ],
                        default='rw'
                    )
                }
            ),
            support_status=support.SupportStatus(version='6.0.0'),
            update_allowed=True
        )
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'node_group_templates'

    def translation_rules(self, props):
        neutron_client_plugin = self.client_plugin('neutron')
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.FLAVOR],
                client_plugin=self.client_plugin('nova'),
                finder='find_flavor_by_name_or_id'),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.FLOATING_IP_POOL],
                client_plugin=neutron_client_plugin,
                finder='find_resourceid_by_name_or_id',
                entity=neutron_client_plugin.RES_TYPE_NETWORK)
            ]

    def _ngt_name(self, name):
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def _prepare_properties(self, props):
        """Prepares the property values."""
        if self.NAME in props:
            props['name'] = self._ngt_name(props[self.NAME])
        if self.FLAVOR in props:
            props['flavor_id'] = props.pop(self.FLAVOR)
        return props

    def handle_create(self):
        props = dict((k, v) for k, v in six.iteritems(self.properties))
        args = self._prepare_properties(props)
        node_group_template = self.client().node_group_templates.create(**args)
        LOG.info("Node Group Template '%s' has been created",
                 node_group_template.name)
        self.resource_id_set(node_group_template.id)
        return self.resource_id

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            args = self._prepare_properties(prop_diff)
            self.client().node_group_templates.update(self.resource_id, **args)

    def validate(self):
        res = super(SaharaNodeGroupTemplate, self).validate()
        if res:
            return res
        pool = self.properties[self.FLOATING_IP_POOL]
        if pool:
            if self.is_using_neutron():
                neutron_client_plugin = self.client_plugin('neutron')
                try:
                    neutron_client_plugin.find_resourceid_by_name_or_id(
                        neutron_client_plugin.RES_TYPE_NETWORK,
                        pool)
                except Exception as ex:
                    if (neutron_client_plugin.is_not_found(ex)
                            or neutron_client_plugin.is_no_unique(ex)):
                        err_msg = encodeutils.exception_to_unicode(ex)
                        raise exception.StackValidationFailed(message=err_msg)
                    raise

            else:
                try:
                    self.client('nova').floating_ip_pools.find(name=pool)
                except Exception as ex:
                    if self.client_plugin('nova').is_not_found(ex):
                        err_msg = encodeutils.exception_to_unicode(ex)
                        raise exception.StackValidationFailed(message=err_msg)
                    raise

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )

        # validate node processes
        plugin = self.client().plugins.get_version_details(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION])
        allowed_processes = [item for sublist in
                             list(six.itervalues(plugin.node_processes))
                             for item in sublist]
        unsupported_processes = []
        for process in self.properties[self.NODE_PROCESSES]:
            if process not in allowed_processes:
                unsupported_processes.append(process)
        if unsupported_processes:
            msg = (_("Plugin %(plugin)s doesn't support the following "
                     "node processes: %(unsupported)s. Allowed processes are: "
                     "%(allowed)s") %
                   {'plugin': self.properties[self.PLUGIN_NAME],
                    'unsupported': ', '.join(unsupported_processes),
                    'allowed': ', '.join(allowed_processes)})
            raise exception.StackValidationFailed(
                path=[self.stack.t.RESOURCES,
                      self.name,
                      self.stack.t.get_section_name(rsrc_defn.PROPERTIES)],
                message=msg)

    def parse_live_resource_data(self, resource_properties, resource_data):
        result = super(SaharaNodeGroupTemplate, self).parse_live_resource_data(
            resource_properties, resource_data)

        for group in result[self.SHARES] or []:
            remove_keys = set(group.keys()) - set(self._SHARE_KEYS)
            for key in remove_keys:
                del group[key]
        result[self.FLAVOR] = resource_data.get('flavor_id')
        return result
Esempio n. 13
0
class SaharaClusterTemplate(resource.Resource):
    """A resource for managing Sahara cluster templates.

    A Cluster Template is designed to bring Node Group Templates together to
    form a Cluster. A Cluster Template defines what Node Groups will be
    included and how many instances will be created in each. Some data
    processing framework configurations can not be applied to a single node,
    but to a whole Cluster. A user can specify these kinds of configurations in
    a Cluster Template. Sahara enables users to specify which processes should
    be added to an anti-affinity group within a Cluster Template. If a process
    is included into an anti-affinity group, it means that VMs where this
    process is going to be launched should be scheduled to different hardware
    hosts.
    """

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, DESCRIPTION,
        ANTI_AFFINITY, MANAGEMENT_NETWORK,
        CLUSTER_CONFIGS, NODE_GROUPS, IMAGE_ID, USE_AUTOCONFIG,
        SHARES
    ) = (
        'name', 'plugin_name', 'hadoop_version', 'description',
        'anti_affinity', 'neutron_management_network',
        'cluster_configs', 'node_groups', 'default_image_id', 'use_autoconfig',
        'shares'
    )

    _NODE_GROUP_KEYS = (
        NG_NAME, COUNT, NG_TEMPLATE_ID,
    ) = (
        'name', 'count', 'node_group_template_id',
    )

    _SHARE_KEYS = (
        SHARE_ID, PATH, ACCESS_LEVEL
    ) = (
        'id', 'path', 'access_level'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Cluster Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Sahara Group Template.'),
            default="",
            update_allowed=True
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ],
            update_allowed=True
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
            update_allowed=True
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the default image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            update_allowed=True
        ),
        MANAGEMENT_NETWORK: properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
            update_allowed=True
        ),
        ANTI_AFFINITY: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to enable anti-affinity for."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        CLUSTER_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _('Cluster configs dictionary.'),
            update_allowed=True
        ),
        NODE_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _('Node groups.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NG_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the Node group.'),
                        required=True
                    ),
                    COUNT: properties.Schema(
                        properties.Schema.INTEGER,
                        _("Number of instances in the Node group."),
                        required=True,
                        constraints=[
                            constraints.Range(min=1)
                        ]
                    ),
                    NG_TEMPLATE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("ID of the Node Group Template."),
                        required=True
                    ),
                }
            ),
            update_allowed=True
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0')
        ),
        SHARES: properties.Schema(
            properties.Schema.LIST,
            _("List of manila shares to be mounted."),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SHARE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("Id of the manila share."),
                        required=True
                    ),
                    PATH: properties.Schema(
                        properties.Schema.STRING,
                        _("Local path on each cluster node on which to mount "
                          "the share. Defaults to '/mnt/{share_id}'.")
                    ),
                    ACCESS_LEVEL: properties.Schema(
                        properties.Schema.STRING,
                        _("Governs permissions set in manila for the cluster "
                          "ips."),
                        constraints=[
                            constraints.AllowedValues(['rw', 'ro']),
                        ],
                        default='rw'
                    )
                }
            ),
            support_status=support.SupportStatus(version='6.0.0'),
            update_allowed=True
        )
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'cluster_templates'

    def translation_rules(self, props):
        neutron_client_plugin = self.client_plugin('neutron')
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.MANAGEMENT_NETWORK],
                client_plugin=neutron_client_plugin,
                finder='find_resourceid_by_name_or_id',
                entity=neutron_client_plugin.RES_TYPE_NETWORK)
        ]

    def _cluster_template_name(self, name):
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def _prepare_properties(self, props):
        """Prepares the property values."""
        if self.NAME in props:
            props['name'] = self._cluster_template_name(props[self.NAME])
        if self.MANAGEMENT_NETWORK in props:
            props['net_id'] = props.pop(self.MANAGEMENT_NETWORK)
        return props

    def handle_create(self):
        props = dict((k, v) for k, v in six.iteritems(self.properties))
        args = self._prepare_properties(props)
        cluster_template = self.client().cluster_templates.create(**args)
        LOG.info("Cluster Template '%s' has been created",
                 cluster_template.name)
        self.resource_id_set(cluster_template.id)
        return self.resource_id

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            args = self._prepare_properties(prop_diff)
            self.client().cluster_templates.update(self.resource_id, **args)

    def validate(self):
        res = super(SaharaClusterTemplate, self).validate()
        if res:
            return res
        # check if running on neutron and MANAGEMENT_NETWORK missing
        if (self.is_using_neutron() and
                not self.properties[self.MANAGEMENT_NETWORK]):
            msg = _("%s must be provided"
                    ) % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )

    def parse_live_resource_data(self, resource_properties, resource_data):
        result = super(SaharaClusterTemplate, self).parse_live_resource_data(
            resource_properties, resource_data)

        for group in result[self.NODE_GROUPS] or []:
            remove_keys = set(group.keys()) - set(self._NODE_GROUP_KEYS)
            for key in remove_keys:
                del group[key]
        for group in result[self.SHARES] or []:
            remove_keys = set(group.keys()) - set(self._SHARE_KEYS)
            for key in remove_keys:
                del group[key]
        return result
Esempio n. 14
0
 def test_allowed_pattern_schema(self):
     d = {'allowed_pattern': '[A-Za-z0-9]', 'description': 'alphanumeric'}
     r = constraints.AllowedPattern('[A-Za-z0-9]',
                                    description='alphanumeric')
     self.assertEqual(d, dict(r))
Esempio n. 15
0
class Instance(resource.Resource):
    """OpenStack cloud database instance resource.

    Trove is Database as a Service for OpenStack. It's designed to run entirely
    on OpenStack, with the goal of allowing users to quickly and easily utilize
    the features of a relational or non-relational database without the burden
    of handling complex administrative tasks.
    """

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
        REPLICA_OF,
        REPLICA_COUNT,
    ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
         'restore_point', 'datastore_type', 'datastore_version', 'networks',
         'replica_of', 'replica_count')

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          update_allowed=True,
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            update_allowed=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          update_allowed=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IPv4 address for this NIC.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_\-]+'
                                                       r'[a-zA-Z0-9_@?#\s\-]*'
                                                       r'[a-zA-Z0-9_\-]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            update_allowed=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      update_allowed=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%',
                        update_allowed=True),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        update_allowed=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
        REPLICA_OF:
        properties.Schema(
            properties.Schema.STRING,
            _('Identifier of the source instance to replicate.'),
            support_status=support.SupportStatus(version='5.0.0')),
        REPLICA_COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of replicas to be created.'),
            support_status=support.SupportStatus(version='5.0.0')),
    }

    attributes_schema = {
        HOSTNAME:
        attributes.Schema(_("Hostname of the instance."),
                          type=attributes.Schema.STRING),
        HREF:
        attributes.Schema(_("Api endpoint reference of the instance."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'trove'

    entity = 'instances'

    def translation_rules(self, properties):
        return [
            translation.TranslationRule(properties,
                                        translation.TranslationRule.RESOLVE,
                                        [self.FLAVOR],
                                        client_plugin=self.client_plugin(),
                                        finder='find_flavor_by_name_or_id')
        ]

    def __init__(self, name, json_snippet, stack):
        super(Instance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.client().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        """Create cloud database instance."""
        self.flavor = self.properties[self.FLAVOR]
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties[self.DATABASES]
        self.users = self.properties[self.USERS]
        restore_point = self.properties[self.RESTORE_POINT]
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties[self.AVAILABILITY_ZONE]
        self.datastore_type = self.properties[self.DATASTORE_TYPE]
        self.datastore_version = self.properties[self.DATASTORE_VERSION]
        replica_of = self.properties[self.REPLICA_OF]
        replica_count = self.properties[self.REPLICA_COUNT]

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties[self.NICS]:
            nic_dict = {}
            net = nic.get(self.NET)
            port = nic.get(self.PORT)
            if net or port:
                neutron = self.client_plugin('neutron')
            if net:
                net_id = neutron.find_resourceid_by_name_or_id(
                    neutron.RES_TYPE_NETWORK, net)
                nic_dict['net-id'] = net_id
            if port:
                port_id = neutron.find_resourceid_by_name_or_id(
                    neutron.RES_TYPE_PORT, port)
                nic_dict['port-id'] = port_id
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create db instance
        instance = self.client().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics,
            replica_of=replica_of,
            replica_count=replica_count)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.client().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warning(
                    "Stack %(name)s (%(id)s) received an "
                    "OverLimit response during instance.get():"
                    " %(exception)s", {
                        'name': self.stack.name,
                        'id': self.stack.id,
                        'exception': exc
                    })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        """Check if cloud DB instance creation is complete."""
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            "Database instance %(database)s created "
            "(flavor:%(flavor)s, volume:%(volume)s, "
            "datastore:%(datastore_type)s, "
            "datastore_version:%(datastore_version)s)", {
                'database': self._dbinstance_name(),
                'flavor': self.flavor,
                'volume': self.volume,
                'datastore_type': self.datastore_type,
                'datastore_version': self.datastore_version
            })
        return True

    def handle_check(self):
        instance = self.client().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        updates = {}
        if prop_diff:
            instance = self.client().instances.get(self.resource_id)
            if self.NAME in prop_diff:
                updates.update({self.NAME: prop_diff[self.NAME]})
            if self.FLAVOR in prop_diff:
                flv = prop_diff[self.FLAVOR]
                updates.update({self.FLAVOR: flv})
            if self.SIZE in prop_diff:
                updates.update({self.SIZE: prop_diff[self.SIZE]})
            if self.DATABASES in prop_diff:
                current = [
                    d.name for d in self.client().databases.list(instance)
                ]
                desired = [
                    d[self.DATABASE_NAME] for d in prop_diff[self.DATABASES]
                ]
                for db in prop_diff[self.DATABASES]:
                    dbname = db[self.DATABASE_NAME]
                    if dbname not in current:
                        db['ACTION'] = self.CREATE
                for dbname in current:
                    if dbname not in desired:
                        deleted = {
                            self.DATABASE_NAME: dbname,
                            'ACTION': self.DELETE
                        }
                        prop_diff[self.DATABASES].append(deleted)
                updates.update({self.DATABASES: prop_diff[self.DATABASES]})
            if self.USERS in prop_diff:
                current = [u.name for u in self.client().users.list(instance)]
                desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]]
                for usr in prop_diff[self.USERS]:
                    if usr[self.USER_NAME] not in current:
                        usr['ACTION'] = self.CREATE
                for usr in current:
                    if usr not in desired:
                        prop_diff[self.USERS].append({
                            self.USER_NAME: usr,
                            'ACTION': self.DELETE
                        })
                updates.update({self.USERS: prop_diff[self.USERS]})
        return updates

    def check_update_complete(self, updates):
        instance = self.client().instances.get(self.resource_id)
        if instance.status in self.BAD_STATUSES:
            raise exception.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))
        if updates:
            if instance.status != self.ACTIVE:
                dmsg = ("Instance is in status %(now)s. Waiting on status"
                        " %(stat)s")
                LOG.debug(dmsg % {"now": instance.status, "stat": self.ACTIVE})
                return False
            try:
                return (
                    self._update_name(instance, updates.get(self.NAME))
                    and self._update_flavor(instance, updates.get(self.FLAVOR))
                    and self._update_size(instance, updates.get(self.SIZE))
                    and self._update_databases(instance,
                                               updates.get(self.DATABASES))
                    and self._update_users(instance, updates.get(self.USERS)))
            except Exception as exc:
                if self.client_plugin().is_client_exception(exc):
                    # the instance could have updated between the time
                    # we retrieve it and try to update it so check again
                    if self.client_plugin().is_over_limit(exc):
                        LOG.debug("API rate limit: %(ex)s. Retrying.",
                                  {'ex': six.text_type(exc)})
                        return False
                    if "No change was requested" in six.text_type(exc):
                        LOG.warning("Unexpected instance state change "
                                    "during update. Retrying.")
                        return False
                raise
        return True

    def _update_name(self, instance, name):
        if name and instance.name != name:
            self.client().instances.edit(instance, name=name)
            return False
        return True

    def _update_flavor(self, instance, new_flavor):
        if new_flavor:
            current_flav = six.text_type(instance.flavor['id'])
            new_flav = six.text_type(new_flavor)
            if new_flav != current_flav:
                dmsg = "Resizing instance flavor from %(old)s to %(new)s"
                LOG.debug(dmsg % {"old": current_flav, "new": new_flav})
                self.client().instances.resize_instance(instance, new_flavor)
                return False
        return True

    def _update_size(self, instance, new_size):
        if new_size and instance.volume['size'] != new_size:
            dmsg = "Resizing instance storage from %(old)s to %(new)s"
            LOG.debug(dmsg % {"old": instance.volume['size'], "new": new_size})
            self.client().instances.resize_volume(instance, new_size)
            return False
        return True

    def _update_databases(self, instance, databases):
        if databases:
            for db in databases:
                if db.get("ACTION") == self.CREATE:
                    db.pop("ACTION", None)
                    dmsg = "Adding new database %(db)s to instance"
                    LOG.debug(dmsg % {"db": db})
                    self.client().databases.create(instance, [db])
                elif db.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing database %(db)s from "
                            "instance")
                    LOG.debug(dmsg % {"db": db['name']})
                    self.client().databases.delete(instance, db['name'])
        return True

    def _update_users(self, instance, users):
        if users:
            for usr in users:
                dbs = [{'name': db} for db in usr.get(self.USER_DATABASES, [])]
                usr[self.USER_DATABASES] = dbs
                if usr.get("ACTION") == self.CREATE:
                    usr.pop("ACTION", None)
                    dmsg = "Adding new user %(u)s to instance"
                    LOG.debug(dmsg % {"u": usr})
                    self.client().users.create(instance, [usr])
                elif usr.get("ACTION") == self.DELETE:
                    dmsg = ("Deleting existing user %(u)s from " "instance")
                    LOG.debug(dmsg % {"u": usr['name']})
                    self.client().users.delete(instance, usr['name'])
                else:
                    newattrs = {}
                    if usr.get(self.USER_HOST):
                        newattrs[self.USER_HOST] = usr[self.USER_HOST]
                    if usr.get(self.USER_PASSWORD):
                        newattrs[self.USER_PASSWORD] = usr[self.USER_PASSWORD]
                    if newattrs:
                        self.client().users.update_attributes(
                            instance,
                            usr['name'],
                            newuserattr=newattrs,
                            hostname=instance.hostname)
                    current = self.client().users.get(instance,
                                                      usr[self.USER_NAME])
                    dbs = [db['name'] for db in current.databases]
                    desired = [
                        db['name'] for db in usr.get(self.USER_DATABASES, [])
                    ]
                    grants = [db for db in desired if db not in dbs]
                    revokes = [db for db in dbs if db not in desired]
                    if grants:
                        self.client().users.grant(instance,
                                                  usr[self.USER_NAME], grants)
                    if revokes:
                        self.client().users.revoke(instance,
                                                   usr[self.USER_NAME],
                                                   revokes)
        return True

    def parse_live_resource_data(self, resource_properties, resource_data):
        """A method to parse live resource data to update current resource.

        NOTE: cannot update users from live resource data in case of
        impossibility to get required user password.
        """
        dbs = [d.name for d in self.client().databases.list(self.resource_id)]
        dbs_reality = []
        for resource_db in resource_properties[self.DATABASES]:
            if resource_db[self.DATABASE_NAME] in dbs:
                dbs_reality.append(resource_db)
                dbs.remove(resource_db[self.DATABASE_NAME])
        # cannot get any property for databases except for name, so update
        # resource with name
        dbs_reality.extend([{self.DATABASE_NAME: db} for db in dbs])
        result = {
            self.NAME: resource_data.get('name'),
            self.DATABASES: dbs_reality
        }
        if resource_data.get('flavor') is not None:
            result[self.FLAVOR] = resource_data['flavor'].get('id')
        if resource_data.get('volume') is not None:
            result[self.SIZE] = resource_data['volume']['size']
        return result

    def handle_delete(self):
        """Delete a cloud database instance."""
        if not self.resource_id:
            return

        try:
            instance = self.client().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        """Check for completion of cloud DB instance deletion."""
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        """Validate any of the provided params."""
        res = super(Instance, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties[self.USERS]
        if users:
            databases = self.properties[self.DATABASES]
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties[self.NICS]
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
Esempio n. 16
0
class SaharaCluster(resource.Resource):
    """A resource for managing Sahara clusters.

    The Cluster entity represents a collection of VM instances that all have
    the same data processing framework installed. It is mainly characterized by
    a VM image with a pre-installed framework which will be used for cluster
    deployment. Users may choose one of the pre-configured Cluster Templates to
    start a Cluster. To get access to VMs after a Cluster has started, the user
    should specify a keypair.
    """

    PROPERTIES = (NAME, PLUGIN_NAME, HADOOP_VERSION, CLUSTER_TEMPLATE_ID,
                  KEY_NAME, IMAGE, MANAGEMENT_NETWORK, IMAGE_ID,
                  USE_AUTOCONFIG,
                  SHARES) = ('name', 'plugin_name', 'hadoop_version',
                             'cluster_template_id', 'key_name', 'image',
                             'neutron_management_network', 'default_image_id',
                             'use_autoconfig', 'shares')

    _SHARE_KEYS = (SHARE_ID, PATH, ACCESS_LEVEL) = ('id', 'path',
                                                    'access_level')

    ATTRIBUTES = (
        STATUS,
        INFO,
    ) = (
        "status",
        "info",
    )

    CLUSTER_STATUSES = (CLUSTER_ACTIVE, CLUSTER_ERROR) = ('Active', 'Error')
    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Hadoop cluster name.'),
            constraints=[
                constraints.Length(min=1, max=SAHARA_CLUSTER_NAME_MAX_LENGTH),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        PLUGIN_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[constraints.CustomConstraint('sahara.plugin')]),
        HADOOP_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        CLUSTER_TEMPLATE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('ID of the Cluster Template used for '
              'Node Groups and configurations.'),
            constraints=[
                constraints.CustomConstraint('sahara.cluster_template')
            ],
            required=True),
        KEY_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Keypair added to instances to make them accessible for user.'),
            constraints=[constraints.CustomConstraint('nova.keypair')],
        ),
        IMAGE:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of the image used to boot Hadoop nodes.'),
            support_status=support.SupportStatus(
                status=support.HIDDEN,
                version='6.0.0',
                previous_status=support.SupportStatus(
                    status=support.DEPRECATED,
                    message=_('Use property %s.') % IMAGE_ID,
                    version='2015.1',
                    previous_status=support.SupportStatus(version='2014.2'))),
            constraints=[constraints.CustomConstraint('glance.image')],
        ),
        IMAGE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('Default name or UUID of the image used to boot Hadoop nodes.'),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            support_status=support.SupportStatus(version='2015.1')),
        MANAGEMENT_NETWORK:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            required=True,
            constraints=[constraints.CustomConstraint('neutron.network')],
        ),
        USE_AUTOCONFIG:
        properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0')),
        SHARES:
        properties.Schema(
            properties.Schema.LIST,
            _("List of manila shares to be mounted."),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    SHARE_ID:
                    properties.Schema(
                        properties.Schema.STRING,
                        _("Id of the manila share."),
                        required=True),
                    PATH:
                    properties.Schema(
                        properties.Schema.STRING,
                        _("Local path on each cluster node on which to mount "
                          "the share. Defaults to '/mnt/{share_id}'.")),
                    ACCESS_LEVEL:
                    properties.Schema(
                        properties.Schema.STRING,
                        _("Governs permissions set in manila for the cluster "
                          "ips."),
                        constraints=[
                            constraints.AllowedValues(['rw', 'ro']),
                        ],
                        default='rw')
                }),
            support_status=support.SupportStatus(version='6.0.0'))
    }

    attributes_schema = {
        STATUS:
        attributes.Schema(_("Cluster status."), type=attributes.Schema.STRING),
        INFO:
        attributes.Schema(_("Cluster information."),
                          type=attributes.Schema.MAP),
    }

    default_client_name = 'sahara'

    entity = 'clusters'

    def translation_rules(self, props):
        rules = [
            translation.TranslationRule(props,
                                        translation.TranslationRule.REPLACE,
                                        [self.IMAGE_ID],
                                        value_path=[self.IMAGE]),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE, [self.IMAGE_ID],
                client_plugin=self.client_plugin('glance'),
                finder='find_image_by_name_or_id'),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE, [self.MANAGEMENT_NETWORK],
                client_plugin=self.client_plugin('neutron'),
                finder='find_resourceid_by_name_or_id',
                entity='network')
        ]
        return rules

    def _cluster_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return self.reduce_physical_resource_name(
            re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name()),
            SAHARA_CLUSTER_NAME_MAX_LENGTH)

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID]
        image_id = self.properties[self.IMAGE_ID]
        # check that image is provided in case when
        # cluster template is missing one
        cluster_template = self.client().cluster_templates.get(
            cluster_template_id)
        if cluster_template.default_image_id is None and not image_id:
            msg = _("%(img)s must be provided: Referenced cluster template "
                    "%(tmpl)s has no default_image_id defined.") % {
                        'img': self.IMAGE_ID,
                        'tmpl': cluster_template_id
                    }
            raise exception.StackValidationFailed(message=msg)

        key_name = self.properties[self.KEY_NAME]
        net_id = self.properties[self.MANAGEMENT_NETWORK]
        use_autoconfig = self.properties[self.USE_AUTOCONFIG]
        shares = self.properties[self.SHARES]

        cluster = self.client().clusters.create(
            self._cluster_name(),
            plugin_name,
            hadoop_version,
            cluster_template_id=cluster_template_id,
            user_keypair_id=key_name,
            default_image_id=image_id,
            net_id=net_id,
            use_autoconfig=use_autoconfig,
            shares=shares)
        LOG.info('Cluster "%s" is being started.', cluster.name)
        self.resource_id_set(cluster.id)
        return self.resource_id

    def check_create_complete(self, cluster_id):
        cluster = self.client().clusters.get(cluster_id)
        if cluster.status == self.CLUSTER_ERROR:
            raise exception.ResourceInError(resource_status=cluster.status)

        if cluster.status != self.CLUSTER_ACTIVE:
            return False

        LOG.info("Cluster '%s' has been created", cluster.name)
        return True

    def check_delete_complete(self, resource_id):
        if not resource_id:
            return True

        try:
            cluster = self.client().clusters.get(resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            LOG.info("Cluster '%s' has been deleted", self._cluster_name())
            return True
        else:
            if cluster.status == self.CLUSTER_ERROR:
                raise exception.ResourceInError(resource_status=cluster.status)

        return False

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        cluster = self.client().clusters.get(self.resource_id)
        return getattr(cluster, name, None)

    def validate(self):
        res = super(SaharaCluster, self).validate()

        if res:
            return res

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION])
Esempio n. 17
0
class SaharaNodeGroupTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
        VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE,
        SECURITY_GROUPS, AUTO_SECURITY_GROUP,
        AVAILABILITY_ZONE, VOLUMES_AVAILABILITY_ZONE,
        NODE_PROCESSES, FLOATING_IP_POOL, NODE_CONFIGS, IMAGE_ID,
        IS_PROXY_GATEWAY, VOLUME_LOCAL_TO_INSTANCE, USE_AUTOCONFIG

    ) = (
        'name', 'plugin_name', 'hadoop_version', 'flavor', 'description',
        'volumes_per_node', 'volumes_size', 'volume_type',
        'security_groups', 'auto_security_group',
        'availability_zone', 'volumes_availability_zone',
        'node_processes', 'floating_ip_pool', 'node_configs', 'image_id',
        'is_proxy_gateway', 'volume_local_to_instance', 'use_autoconfig'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Node Group Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Node Group Template.'),
            default="",
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ]
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        FLAVOR: properties.Schema(
            properties.Schema.STRING,
            _('Name or ID Nova flavor for the nodes.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('nova.flavor')
            ]
        ),
        VOLUMES_PER_NODE: properties.Schema(
            properties.Schema.INTEGER,
            _("Volumes per node."),
            constraints=[
                constraints.Range(min=0),
            ],
        ),
        VOLUMES_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _("Size of the volumes, in GB."),
            constraints=[
                constraints.Range(min=1),
            ],
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _("Type of the volume to create on Cinder backend."),
            constraints=[
                constraints.CustomConstraint('cinder.vtype')
            ]
        ),
        SECURITY_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _("List of security group names or IDs to assign to this "
              "Node Group template."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
        ),
        AUTO_SECURITY_GROUP: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Defines whether auto-assign security group to this "
              "Node Group template."),
        ),
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create servers in."),
        ),
        VOLUMES_AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create volumes in."),
        ),
        NODE_PROCESSES: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to run on every node."),
            required=True,
            constraints=[
                constraints.Length(min=1),
            ],
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
        ),
        FLOATING_IP_POOL: properties.Schema(
            properties.Schema.STRING,
            _("Name or UUID of the Neutron floating IP network or "
              "name of the Nova floating ip pool to use. "
              "Should not be provided when used with Nova-network "
              "that auto-assign floating IPs."),
        ),
        NODE_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _("Dictionary of node configurations."),
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
        ),
        IS_PROXY_GATEWAY: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Provide access to nodes using other nodes of the cluster "
              "as proxy gateways."),
            support_status=support.SupportStatus(version='5.0.0')
        ),
        VOLUME_LOCAL_TO_INSTANCE: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Create volumes on the same physical port as an instance."),
            support_status=support.SupportStatus(version='5.0.0')
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0')
        )
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'node_group_templates'

    def _ngt_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        node_processes = self.properties[self.NODE_PROCESSES]
        description = self.properties[self.DESCRIPTION]
        flavor_id = self.client_plugin("nova").get_flavor_id(
            self.properties[self.FLAVOR])
        volumes_per_node = self.properties[self.VOLUMES_PER_NODE]
        volumes_size = self.properties[self.VOLUMES_SIZE]
        volume_type = self.properties[self.VOLUME_TYPE]
        floating_ip_pool = self.properties[self.FLOATING_IP_POOL]
        security_groups = self.properties[self.SECURITY_GROUPS]
        auto_security_group = self.properties[self.AUTO_SECURITY_GROUP]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]
        vol_availability_zone = self.properties[self.VOLUMES_AVAILABILITY_ZONE]
        image_id = self.properties[self.IMAGE_ID]
        if floating_ip_pool and self.is_using_neutron():
            floating_ip_pool = self.client_plugin(
                'neutron').find_neutron_resource(
                    self.properties, self.FLOATING_IP_POOL, 'network')
        node_configs = self.properties[self.NODE_CONFIGS]
        is_proxy_gateway = self.properties[self.IS_PROXY_GATEWAY]
        volume_local_to_instance = self.properties[
            self.VOLUME_LOCAL_TO_INSTANCE]
        use_autoconfig = self.properties[self.USE_AUTOCONFIG]

        node_group_template = self.client().node_group_templates.create(
            self._ngt_name(),
            plugin_name, hadoop_version, flavor_id,
            description=description,
            volumes_per_node=volumes_per_node,
            volumes_size=volumes_size,
            volume_type=volume_type,
            node_processes=node_processes,
            floating_ip_pool=floating_ip_pool,
            node_configs=node_configs,
            security_groups=security_groups,
            auto_security_group=auto_security_group,
            availability_zone=availability_zone,
            volumes_availability_zone=vol_availability_zone,
            image_id=image_id,
            is_proxy_gateway=is_proxy_gateway,
            volume_local_to_instance=volume_local_to_instance,
            use_autoconfig=use_autoconfig
        )
        LOG.info(_LI("Node Group Template '%s' has been created"),
                 node_group_template.name)
        self.resource_id_set(node_group_template.id)
        return self.resource_id

    def validate(self):
        res = super(SaharaNodeGroupTemplate, self).validate()
        if res:
            return res
        pool = self.properties[self.FLOATING_IP_POOL]
        if pool:
            if self.is_using_neutron():
                try:
                    self.client_plugin('neutron').find_neutron_resource(
                        self.properties, self.FLOATING_IP_POOL, 'network')
                except Exception as ex:
                    if (self.client_plugin('neutron').is_not_found(ex)
                            or self.client_plugin('neutron').is_no_unique(ex)):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise
            else:
                try:
                    self.client('nova').floating_ip_pools.find(name=pool)
                except Exception as ex:
                    if self.client_plugin('nova').is_not_found(ex):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )

        # validate node processes
        plugin = self.client().plugins.get_version_details(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION])
        allowed_processes = [item for sublist in
                             list(six.itervalues(plugin.node_processes))
                             for item in sublist]
        unsupported_processes = []
        for process in self.properties[self.NODE_PROCESSES]:
            if process not in allowed_processes:
                unsupported_processes.append(process)
        if unsupported_processes:
            msg = (_("Plugin %(plugin)s doesn't support the following "
                     "node processes: %(unsupported)s. Allowed processes are: "
                     "%(allowed)s") %
                   {'plugin': self.properties[self.PLUGIN_NAME],
                    'unsupported': ', '.join(unsupported_processes),
                    'allowed': ', '.join(allowed_processes)})
            raise exception.StackValidationFailed(
                path=[self.stack.t.get_section_name('resources'),
                      self.name,
                      self.stack.t.get_section_name('properties')],
                message=msg)
Esempio n. 18
0
class OSDBInstance(resource.Resource):
    '''
    OpenStack cloud database instance resource.
    '''

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    BAD_STATUSES = (ERROR, FAILED)

    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
    ) = (
        'name',
        'flavor',
        'size',
        'databases',
        'users',
        'availability_zone',
        'restore_point',
        'datastore_type',
        'datastore_version',
    )

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(properties.Schema.STRING,
                          _('Reference to a flavor for creating DB instance.'),
                          required=True),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%'),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
    }

    attributes_schema = {
        HOSTNAME: attributes.Schema(_("Hostname of the instance")),
        HREF: attributes.Schema(_("Api endpoint reference of the instance")),
    }

    def __init__(self, name, json_snippet, stack):
        super(OSDBInstance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.trove().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        '''
        Create cloud database instance.
        '''
        self.flavor = nova_utils.get_flavor_id(self.trove(),
                                               self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties.get(self.DATABASES)
        self.users = self.properties.get(self.USERS)
        restore_point = self.properties.get(self.RESTORE_POINT)
        zone = self.properties.get(self.AVAILABILITY_ZONE)
        self.datastore_type = self.properties.get(self.DATASTORE_TYPE)
        self.datastore_version = self.properties.get(self.DATASTORE_VERSION)

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # create db instance
        instance = self.trove().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version)
        self.resource_id_set(instance.id)

        return instance

    def _refresh_instance(self, instance):
        try:
            instance.get()
        except troveclient.exceptions.RequestEntityTooLarge as exc:
            msg = _("Stack %(name)s (%(id)s) received an OverLimit "
                    "response during instance.get(): %(exception)s")
            LOG.warning(msg % {
                'name': self.stack.name,
                'id': self.stack.id,
                'exception': exc
            })

    def check_create_complete(self, instance):
        '''
        Check if cloud DB instance creation is complete.
        '''
        self._refresh_instance(instance)  # get updated attributes
        if instance.status in self.BAD_STATUSES:
            raise exception.Error(_("Database instance creation failed."))

        if instance.status != self.ACTIVE:
            return False

        msg = _("Database instance %(database)s created (flavor:%(flavor)s, "
                "volume:%(volume)s, datastore:%(datastore_type)s, "
                "datastore_version:%(datastore_version)s)")

        LOG.info(
            msg % {
                'database': self._dbinstance_name(),
                'flavor': self.flavor,
                'volume': self.volume,
                'datastore_type': self.datastore_type,
                'datastore_version': self.datastore_version
            })
        return True

    def handle_delete(self):
        '''
        Delete a cloud database instance.
        '''
        if not self.resource_id:
            return

        instance = None
        try:
            instance = self.trove().instances.get(self.resource_id)
        except troveexc.NotFound:
            LOG.debug("Database instance %s not found." % self.resource_id)
            self.resource_id_set(None)
        else:
            instance.delete()
            return instance

    def check_delete_complete(self, instance):
        '''
        Check for completion of cloud DB instance deletion
        '''
        if not instance:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance)
        except troveexc.NotFound:
            self.resource_id_set(None)
            return True

        return False

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(OSDBInstance, self).validate()
        if res:
            return res

        datastore_type = self.properties.get(self.DATASTORE_TYPE)
        datastore_version = self.properties.get(self.DATASTORE_VERSION)

        if datastore_type:
            # get current active versions
            allowed_versions = self.trove().datastore_versions.list(
                datastore_type)
            allowed_version_names = [v.name for v in allowed_versions]
            if datastore_version:
                if datastore_version not in allowed_version_names:
                    msg = _("Datastore version %(dsversion)s "
                            "for datastore type %(dstype)s is not valid. "
                            "Allowed versions are %(allowed)s.") % {
                                'dstype': datastore_type,
                                'dsversion': datastore_version,
                                'allowed': ', '.join(allowed_version_names)
                            }
                    raise exception.StackValidationFailed(message=msg)
            else:
                if len(allowed_versions) > 1:
                    msg = _("Multiple active datastore versions exist for "
                            "datastore type %(dstype)s. "
                            "Explicit datastore version must be provided. "
                            "Allowed versions are %(allowed)s.") % {
                                'dstype': datastore_type,
                                'allowed': ', '.join(allowed_version_names)
                            }
                    raise exception.StackValidationFailed(message=msg)
        else:
            if datastore_version:
                msg = _("Not allowed - %(dsver)s without %(dstype)s.") % {
                    'dsver': self.DATASTORE_VERSION,
                    'dstype': self.DATASTORE_TYPE
                }
                raise exception.StackValidationFailed(message=msg)

        # check validity of user and databases
        users = self.properties.get(self.USERS)
        if not users:
            return

        databases = self.properties.get(self.DATABASES)
        if not databases:
            msg = _('Databases property is required if users property '
                    'is provided for resource %s.') % self.name
            raise exception.StackValidationFailed(message=msg)

        db_names = set([db[self.DATABASE_NAME] for db in databases])
        for user in users:
            missing_db = [
                db_name for db_name in user[self.USER_DATABASES]
                if db_name not in db_names
            ]

            if missing_db:
                msg = (_('Database %(dbs)s specified for user does '
                         'not exist in databases for resource %(name)s.') % {
                             'dbs': missing_db,
                             'name': self.name
                         })
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
Esempio n. 19
0
class SaharaClusterTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, DESCRIPTION,
        ANTI_AFFINITY, MANAGEMENT_NETWORK,
        CLUSTER_CONFIGS, NODE_GROUPS, IMAGE_ID,
    ) = (
        'name', 'plugin_name', 'hadoop_version', 'description',
        'anti_affinity', 'neutron_management_network',
        'cluster_configs', 'node_groups', 'default_image_id',
    )

    _NODE_GROUP_KEYS = (
        NG_NAME, COUNT, NG_TEMPLATE_ID,
    ) = (
        'name', 'count', 'node_group_template_id',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Cluster Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Sahara Group Template.'),
            default="",
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the default image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
        ),
        MANAGEMENT_NETWORK: properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
        ),
        ANTI_AFFINITY: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to enable anti-affinity for."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
        ),
        CLUSTER_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _('Cluster configs dictionary.'),
        ),
        NODE_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _('Node groups.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NG_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the Node group.'),
                        required=True
                    ),
                    COUNT: properties.Schema(
                        properties.Schema.INTEGER,
                        _("Number of instances in the Node group."),
                        required=True,
                        constraints=[
                            constraints.Range(min=1)
                        ]
                    ),
                    NG_TEMPLATE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("ID of the Node Group Template."),
                        required=True
                    ),
                }
            ),

        ),
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    def _cluster_template_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return self.physical_resource_name()

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        description = self.properties[self.DESCRIPTION]
        image_id = self.properties[self.IMAGE_ID]
        net_id = self.properties[self.MANAGEMENT_NETWORK]
        if net_id:
            if self.is_using_neutron():
                net_id = self.client_plugin('neutron').find_neutron_resource(
                    self.properties, self.MANAGEMENT_NETWORK, 'network')
            else:
                net_id = self.client_plugin('nova').get_nova_network_id(
                    net_id)
        anti_affinity = self.properties[self.ANTI_AFFINITY]
        cluster_configs = self.properties[self.CLUSTER_CONFIGS]
        node_groups = self.properties[self.NODE_GROUPS]
        cluster_template = self.client().cluster_templates.create(
            self._cluster_template_name(),
            plugin_name, hadoop_version,
            description=description,
            default_image_id=image_id,
            anti_affinity=anti_affinity,
            net_id=net_id,
            cluster_configs=cluster_configs,
            node_groups=node_groups
        )
        LOG.info(_LI("Cluster Template '%s' has been created"),
                 cluster_template.name)
        self.resource_id_set(cluster_template.id)
        return self.resource_id

    def handle_delete(self):
        if not self.resource_id:
            return
        try:
            self.client().cluster_templates.delete(
                self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        LOG.info(_LI("Cluster Template '%s' has been deleted."),
                 self._cluster_template_name())

    def validate(self):
        res = super(SaharaClusterTemplate, self).validate()
        if res:
            return res
        # check if running on neutron and MANAGEMENT_NETWORK missing
        if (self.is_using_neutron() and
                not self.properties[self.MANAGEMENT_NETWORK]):
            msg = _("%s must be provided"
                    ) % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)
Esempio n. 20
0
 def test_allowed_pattern_invalid_type(self):
     self.assertRaises(constraints.InvalidSchemaError,
                       constraints.Schema,
                       'Integer',
                       constraints=[constraints.AllowedPattern('[0-9]*')])
Esempio n. 21
0
class Lease(resource.Resource):
    """A resource to manage Blazar leases.

    Lease resource manages the reservations of specific type/amount of
    cloud resources within OpenStack.

    Note:
    Based on an agreement with Blazar team, this resource class does not
    support updating, because current Blazar lease scheme is not suitable for
    Heat, if you want to update a lease, you need to specify reservation's id,
    which is one of attribute of lease.
    """

    support_status = support.SupportStatus(version='12.0.0')

    PROPERTIES = (
        NAME,
        START_DATE,
        END_DATE,
        BEFORE_END_DATE,
        RESERVATIONS,
        RESOURCE_TYPE,
        MIN,
        MAX,
        HYPERVISOR_PROPERTIES,
        RESOURCE_PROPERTIES,
        BEFORE_END,
        AMOUNT,
        VCPUS,
        MEMORY_MB,
        DISK_GB,
        AFFINITY,
        EVENTS,
        EVENT_TYPE,
        TIME,
    ) = (
        'name',
        'start_date',
        'end_date',
        'before_end_date',
        'reservations',
        'resource_type',
        'min',
        'max',
        'hypervisor_properties',
        'resource_properties',
        'before_end',
        'amount',
        'vcpus',
        'memory_mb',
        'disk_gb',
        'affinity',
        'events',
        'event_type',
        'time',
    )

    ATTRIBUTES = (
        NAME_ATTR,
        START_DATE_ATTR,
        END_DATE_ATTR,
        CREATED_AT_ATTR,
        UPDATED_AT_ATTR,
        STATUS_ATTR,
        DEGRADED_ATTR,
        USER_ID_ATTR,
        PROJECT_ID_ATTR,
        TRUST_ID_ATTR,
        RESERVATIONS_ATTR,
        EVENTS_ATTR,
    ) = (
        'name',
        'start_date',
        'end_date',
        'created_at',
        'updated_at',
        'status',
        'degraded',
        'user_id',
        'project_id',
        'trust_id',
        'reservations',
        'events',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('The name of the lease.'),
            required=True,
        ),
        START_DATE:
        properties.Schema(
            properties.Schema.STRING,
            _('The start date and time of the lease. '
              'The date and time format must be "CCYY-MM-DD hh:mm".'),
            required=True,
            constraints=[
                constraints.AllowedPattern(r'\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}'),
            ],
        ),
        END_DATE:
        properties.Schema(
            properties.Schema.STRING,
            _('The end date and time of the lease '
              'The date and time format must be "CCYY-MM-DD hh:mm".'),
            required=True,
            constraints=[
                constraints.AllowedPattern(r'\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}'),
            ],
        ),
        BEFORE_END_DATE:
        properties.Schema(
            properties.Schema.STRING,
            _('The date and time for the before-end-action of the lease. '
              'The date and time format must be "CCYY-MM-DD hh:mm".'),
            constraints=[
                constraints.AllowedPattern(r'\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}'),
            ],
        ),
        RESERVATIONS:
        properties.Schema(
            properties.Schema.LIST,
            _('The list of reservations.'),
            required=True,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    RESOURCE_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The type of the resource to reserve.'),
                        required=True,
                        constraints=[
                            constraints.AllowedValues(
                                ['virtual:instance', 'physical:host'])
                        ]),
                    MIN:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The minimum number of hosts to reserve.'),
                        constraints=[constraints.Range(min=1)],
                    ),
                    MAX:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The maximum number of hosts to reserve.'),
                        constraints=[constraints.Range(min=1)],
                    ),
                    HYPERVISOR_PROPERTIES:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Properties of the hypervisor to reserve.'),
                    ),
                    RESOURCE_PROPERTIES:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Properties of the resource to reserve.'),
                    ),
                    BEFORE_END:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The before-end-action of the reservation.'),
                        default="default",
                        constraints=[
                            constraints.AllowedValues(['default', 'snapshot'])
                        ]),
                    AMOUNT:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The amount of instances to reserve.'),
                        constraints=[constraints.Range(min=0, max=2147483647)],
                    ),
                    VCPUS:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('The number of VCPUs per the instance.'),
                        constraints=[constraints.Range(min=0, max=2147483647)],
                    ),
                    MEMORY_MB:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Megabytes of memory per the instance.'),
                        constraints=[constraints.Range(min=0, max=2147483647)],
                    ),
                    DISK_GB:
                    properties.Schema(
                        properties.Schema.INTEGER,
                        _('Gigabytes of the local disk per the instance.'),
                        constraints=[constraints.Range(min=0, max=2147483647)],
                    ),
                    AFFINITY:
                    properties.Schema(
                        properties.Schema.BOOLEAN,
                        _('The affinity of instances to reserve.'),
                        default=False,
                    ),
                },
            ),
        ),
        EVENTS:
        properties.Schema(
            properties.Schema.LIST,
            _('A list of event objects.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    EVENT_TYPE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The type of the event (e.g. notification).'),
                        required=True,
                    ),
                    TIME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The date and time of the event. '
                          'The date and time format must be '
                          '"CCYY-MM-DD hh:mm".'),
                        required=True,
                    ),
                },
            ),
        ),
    }

    attributes_schema = {
        NAME_ATTR:
        attributes.Schema(_('The name of the lease.'),
                          type=attributes.Schema.STRING),
        START_DATE_ATTR:
        attributes.Schema(_('The start date and time of the lease. '
                            'The date and time format is "CCYY-MM-DD hh:mm".'),
                          type=attributes.Schema.STRING),
        END_DATE_ATTR:
        attributes.Schema(_('The end date and time of the lease. '
                            'The date and time format is "CCYY-MM-DD hh:mm".'),
                          type=attributes.Schema.STRING),
        CREATED_AT_ATTR:
        attributes.Schema(_('The date and time when the lease was created. '
                            'The date and time format is "CCYY-MM-DD hh:mm".'),
                          type=attributes.Schema.STRING),
        UPDATED_AT_ATTR:
        attributes.Schema(_('The date and time when the lease was updated. '
                            'The date and time format is "CCYY-MM-DD hh:mm".'),
                          type=attributes.Schema.STRING),
        STATUS_ATTR:
        attributes.Schema(_('The status of the lease.'),
                          type=attributes.Schema.STRING),
        DEGRADED_ATTR:
        attributes.Schema(_(
            'The flag which represents condition of reserved resources of '
            'the lease. If it is true, the amount of reserved resources is '
            'less than the request or reserved resources were changed.'),
                          type=attributes.Schema.BOOLEAN),
        USER_ID_ATTR:
        attributes.Schema(_('The UUID of the lease owner.'),
                          type=attributes.Schema.STRING),
        PROJECT_ID_ATTR:
        attributes.Schema(_('The UUID the project which owns the lease.'),
                          type=attributes.Schema.STRING),
        TRUST_ID_ATTR:
        attributes.Schema(_('The UUID of the trust of the lease owner.'),
                          type=attributes.Schema.STRING),
        RESERVATIONS_ATTR:
        attributes.Schema(_('A list of reservation objects.'),
                          type=attributes.Schema.LIST),
        EVENTS_ATTR:
        attributes.Schema(_('Event information of the lease.'),
                          type=attributes.Schema.LIST),
    }

    default_client_name = 'blazar'

    entity = 'lease'

    def validate(self):
        super(Lease, self).validate()
        if not self.client_plugin().has_host():
            msg = ("Couldn't find any host in Blazar. "
                   "You must create a host before creating a lease.")
            raise exception.StackValidationFailed(message=msg)

    def _parse_reservation(self, rsv):
        if rsv['resource_type'] == "physical:host":
            for key in ['vcpus', 'memory_mb', 'disk_gb', 'affinity', 'amount']:
                rsv.pop(key)
        elif rsv['resource_type'] == "virtual:instance":
            for key in ['hypervisor_properties', 'max', 'min', 'before_end']:
                rsv.pop(key)

        return rsv

    def handle_create(self):
        args = dict(
            (k, v) for k, v in self.properties.items() if v is not None)
        # rename keys
        args['start'] = args.pop('start_date')
        args['end'] = args.pop('end_date')

        # parse reservations
        args['reservations'] = [
            self._parse_reservation(rsv) for rsv in args['reservations']
        ]
        lease = self.client_plugin().create_lease(**args)
        self.resource_id_set(lease['id'])
        return lease['id']

    def _resolve_attribute(self, name):
        if self.resource_id is None:
            return
        lease = self.client_plugin().get_lease(self.resource_id)
        try:
            return lease[name]
        except KeyError:
            raise exception.InvalidTemplateAttribute(resource=self.name,
                                                     key=name)
Esempio n. 22
0
class CloudDBInstance(resource.Resource):
    '''
    Rackspace cloud database resource.
    '''

    PROPERTIES = (
        INSTANCE_NAME,
        FLAVOR_REF,
        VOLUME_SIZE,
        DATABASES,
        USERS,
    ) = (
        'InstanceName',
        'FlavorRef',
        'VolumeSize',
        'Databases',
        'Users',
    )

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'Character_set',
        'Collate',
        'Name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'Name',
        'Password',
        'Host',
        'Databases',
    )

    properties_schema = {
        INSTANCE_NAME:
        properties.Schema(properties.Schema.STRING,
                          required=True,
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR_REF:
        properties.Schema(properties.Schema.STRING, required=True),
        VOLUME_SIZE:
        properties.Schema(properties.Schema.NUMBER,
                          required=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        DATABASES:
        properties.Schema(properties.Schema.LIST,
                          schema=properties.Schema(
                              properties.Schema.MAP,
                              schema={
                                  DATABASE_CHARACTER_SET:
                                  properties.Schema(properties.Schema.STRING,
                                                    default='utf8'),
                                  DATABASE_COLLATE:
                                  properties.Schema(properties.Schema.STRING,
                                                    default='utf8_general_ci'),
                                  DATABASE_NAME:
                                  properties.Schema(
                                      properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.Length(max=64),
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                              },
                          )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.Length(max=16),
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      required=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(properties.Schema.STRING, default='%'),
                    USER_DATABASES:
                    properties.Schema(properties.Schema.LIST, required=True),
                },
            )),
    }

    attributes_schema = {
        "hostname": "Hostname of the instance",
        "href": "Api endpoint reference of the instance"
    }

    def __init__(self, name, json_snippet, stack):
        super(CloudDBInstance, self).__init__(name, json_snippet, stack)
        self.hostname = None
        self.href = None

    def cloud_db(self):
        return self.stack.clients.cloud_db()

    def handle_create(self):
        '''
        Create Rackspace Cloud DB Instance.
        '''
        logger.debug("Cloud DB instance handle_create called")
        self.sqlinstancename = self.properties[self.INSTANCE_NAME]
        self.flavor = self.properties[self.FLAVOR_REF]
        self.volume = self.properties[self.VOLUME_SIZE]
        self.databases = self.properties.get(self.DATABASES, None)
        self.users = self.properties.get(self.USERS, None)

        # create db instance
        logger.info("Creating Cloud DB instance %s" % self.sqlinstancename)
        instance = self.cloud_db().create(self.sqlinstancename,
                                          flavor=self.flavor,
                                          volume=self.volume)
        if instance is not None:
            self.resource_id_set(instance.id)

        self.hostname = instance.hostname
        self.href = instance.links[0]['href']
        return instance

    def check_create_complete(self, instance):
        '''
        Check if cloud DB instance creation is complete.
        '''
        instance.get()  # get updated attributes
        if instance.status == 'ERROR':
            instance.delete()
            raise exception.Error("Cloud DB instance creation failed.")

        if instance.status != 'ACTIVE':
            return False

        logger.info("Cloud DB instance %s created (flavor:%s, volume:%s)" %
                    (self.sqlinstancename, self.flavor, self.volume))
        # create databases
        for database in self.databases:
            instance.create_database(
                database[self.DATABASE_NAME],
                character_set=database[self.DATABASE_CHARACTER_SET],
                collate=database[self.DATABASE_COLLATE])
            logger.info("Database %s created on cloud DB instance %s" %
                        (database[self.DATABASE_NAME], self.sqlinstancename))

        # add users
        dbs = []
        for user in self.users:
            if user[self.USER_DATABASES]:
                dbs = user[self.USER_DATABASES]
            instance.create_user(user[self.DATABASE_NAME],
                                 user[self.USER_PASSWORD], dbs)
            logger.info("Cloud database user %s created successfully" %
                        (user[self.DATABASE_NAME]))
        return True

    def handle_delete(self):
        '''
        Delete a Rackspace Cloud DB Instance.
        '''
        logger.debug("CloudDBInstance handle_delete called.")
        if self.resource_id is None:
            return
        try:
            self.cloud_db().delete(self.resource_id)
        except ClientException as cexc:
            if str(cexc.code) != "404":
                raise cexc

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(CloudDBInstance, self).validate()
        if res:
            return res

        # check validity of user and databases
        users = self.properties.get(self.USERS, None)
        if not users:
            return

        databases = self.properties.get(self.DATABASES, None)
        if not databases:
            return {
                'Error':
                'Databases property is required if Users property'
                ' is provided'
            }

        for user in users:
            if not user[self.USER_DATABASES]:
                return {
                    'Error':
                    'Must provide access to at least one database for '
                    'user %s' % user[self.DATABASE_NAME]
                }

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            missing_db = [
                db_name for db_name in user[self.USER_DATABASES]
                if db_name not in db_names
            ]
            if missing_db:
                return {
                    'Error':
                    'Database %s specified for user does not exist in '
                    'databases.' % missing_db
                }
        return

    def _hostname(self):
        if self.hostname is None and self.resource_id is not None:
            dbinstance = self.cloud_db().get(self.resource_id)
            self.hostname = dbinstance.hostname

        return self.hostname

    def _href(self):
        if self.href is None and self.resource_id is not None:
            dbinstance = self.cloud_db().get(self.resource_id)
            self.href = self._gethref(dbinstance)

        return self.href

    def _gethref(self, dbinstance):
        if dbinstance is None or dbinstance.links is None:
            return None

        for link in dbinstance.links:
            if link['rel'] == 'self':
                return link['href']

    def _resolve_attribute(self, name):
        if name == 'hostname':
            return self._hostname()
        elif name == 'href':
            return self._href()
        else:
            return None
Esempio n. 23
0
class TapFlow(neutron.NeutronResource):
    """A resource for neutron tap-as-a-service tap-flow.

    This plug-in requires neutron-taas. So to enable this
    plug-in, install this library and restart the heat-engine.

    A Tap-Flow represents the port from which the traffic needs
    to be mirrored.
    """

    required_service_extension = 'taas'

    entity = 'tap_flow'

    support_status = support.SupportStatus(version='12.0.0')

    PROPERTIES = (
        NAME, DESCRIPTION, PORT, TAP_SERVICE, DIRECTION,
        VLAN_FILTER
        ) = (
        'name', 'description', 'port', 'tap_service', 'direction',
        'vlan_filter'
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name for the Tap-Flow.'),
            default="",
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description for the Tap-Flow.'),
            default="",
            update_allowed=True
        ),
        PORT: properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the tap-flow neutron port.'),
            constraints=[constraints.CustomConstraint('neutron.port')],
            required=True,
        ),
        TAP_SERVICE: properties.Schema(
            properties.Schema.STRING,
            _('ID or name of the neutron tap-service.'),
            constraints=[
                constraints.CustomConstraint('neutron.taas.tap_service')
            ],
            required=True,
        ),
        DIRECTION: properties.Schema(
            properties.Schema.STRING,
            _('The Direction to capture the traffic on.'),
            default='BOTH',
            constraints=[
                constraints.AllowedValues(['IN', 'OUT', 'BOTH']),
            ]
        ),
        VLAN_FILTER: properties.Schema(
            properties.Schema.STRING,
            _('Comma separated list of VLANs, data for which needs to be '
              'captured on probe VM.'),
            constraints=[
                constraints.AllowedPattern(COMMA_SEPARATED_LIST_REGEX),
            ],
        ),
    }

    def translation_rules(self, props):
        return [
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.PORT],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='port'
            ),
            translation.TranslationRule(
                props,
                translation.TranslationRule.RESOLVE,
                [self.TAP_SERVICE],
                client_plugin=self.client_plugin(),
                finder='find_resourceid_by_name_or_id',
                entity='tap_service'
            )
        ]

    def _show_resource(self):
        return self.client_plugin().show_ext_resource('tap_flow',
                                                      self.resource_id)

    def handle_create(self):
        props = self.prepare_properties(self.properties,
                                        self.physical_resource_name())
        props['source_port'] = props.pop(self.PORT)
        props['tap_service_id'] = props.pop(self.TAP_SERVICE)
        tap_flow = self.client_plugin().create_ext_resource('tap_flow',
                                                            props)
        self.resource_id_set(tap_flow['id'])

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.prepare_update_properties(prop_diff)
            self.client_plugin().update_ext_resource('tap_flow', prop_diff,
                                                     self.resource_id)

    def handle_delete(self):
        if self.resource_id is None:
            return
        with self.client_plugin().ignore_not_found:
            self.client_plugin().delete_ext_resource('tap_flow',
                                                     self.resource_id)

    def check_create_complete(self, data):
        return self.client_plugin().check_ext_resource_status(
            'tap_flow', self.resource_id)

    def check_update_complete(self, prop_diff):
        if prop_diff:
            return self.client_plugin().check_ext_resource_status(
                'tap_flow', self.resource_id)
        return True

    def check_delete_complete(self, data):
        if self.resource_id is None:
            return True

        with self.client_plugin().ignore_not_found:
            try:
                if self.client_plugin().check_ext_resource_status(
                        'tap_flow', self.resource_id):
                    self.client_plugin().delete_ext_resource(
                        'tap_flow', self.resource_id)
            except exception.ResourceInError:
                # Still try to delete tap resource in error state
                self.client_plugin().delete_ext_resource('tap_flow',
                                                         self.resource_id)
            return False

        return True
Esempio n. 24
0
class SaharaNodeGroupTemplate(resource.Resource):

    PROPERTIES = (
        NAME,
        PLUGIN_NAME,
        HADOOP_VERSION,
        FLAVOR,
        DESCRIPTION,
        VOLUMES_PER_NODE,
        VOLUMES_SIZE,
        NODE_PROCESSES,
        FLOATING_IP_POOL,
        NODE_CONFIGS,
    ) = (
        'name',
        'plugin_name',
        'hadoop_version',
        'flavor',
        'description',
        'volumes_per_node',
        'volumes_size',
        'node_processes',
        'floating_ip_pool',
        'node_configs',
    )

    properties_schema = {
        NAME:
        properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Node Group Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        DESCRIPTION:
        properties.Schema(
            properties.Schema.STRING,
            _('Description of the Node Group Template.'),
            default="",
        ),
        PLUGIN_NAME:
        properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Name or ID Nova flavor for the nodes.'),
            required=True,
        ),
        VOLUMES_PER_NODE:
        properties.Schema(
            properties.Schema.INTEGER,
            _("Volumes per node."),
            constraints=[
                constraints.Range(min=0),
            ],
        ),
        VOLUMES_SIZE:
        properties.Schema(
            properties.Schema.INTEGER,
            _("Size of the volumes, in GB."),
            constraints=[
                constraints.Range(min=1),
            ],
        ),
        NODE_PROCESSES:
        properties.Schema(
            properties.Schema.LIST,
            _("List of processes to run on every node."),
            required=True,
            constraints=[
                constraints.Length(min=1),
            ],
            schema=properties.Schema(properties.Schema.STRING, ),
        ),
        FLOATING_IP_POOL:
        properties.Schema(
            properties.Schema.STRING,
            _("Name or UUID of the Neutron floating IP network to use."),
            constraints=[
                constraints.CustomConstraint('neutron.network'),
            ],
        ),
        NODE_CONFIGS:
        properties.Schema(
            properties.Schema.MAP,
            _("Dictionary of node configurations."),
        ),
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    def _ngt_name(self):
        name = self.properties.get(self.NAME)
        if name:
            return name
        return self.physical_resource_name()

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        node_processes = self.properties[self.NODE_PROCESSES]
        description = self.properties[self.DESCRIPTION]
        flavor_id = self.client_plugin("nova").get_flavor_id(
            self.properties[self.FLAVOR])
        volumes_per_node = self.properties.get(self.VOLUMES_PER_NODE)
        volumes_size = self.properties.get(self.VOLUMES_SIZE)
        floating_ip_pool = self.properties.get(self.FLOATING_IP_POOL)
        if floating_ip_pool:
            floating_ip_pool = self.client_plugin(
                'neutron').find_neutron_resource(self.properties,
                                                 self.FLOATING_IP_POOL,
                                                 'network')
        node_configs = self.properties.get(self.NODE_CONFIGS)

        node_group_template = self.client().node_group_templates.create(
            self._ngt_name(),
            plugin_name,
            hadoop_version,
            flavor_id,
            description=description,
            volumes_per_node=volumes_per_node,
            volumes_size=volumes_size,
            node_processes=node_processes,
            floating_ip_pool=floating_ip_pool,
            node_configs=node_configs)
        LOG.info(
            _("Node Group Template '%s' has been created") %
            node_group_template.name)
        self.resource_id_set(node_group_template.id)
        return self.resource_id

    def handle_delete(self):
        if not self.resource_id:
            return
        try:
            self.client().node_group_templates.delete(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        LOG.info(
            _("Node Group Template '%s' has been deleted.") % self._ngt_name())

    def validate(self):
        res = super(SaharaNodeGroupTemplate, self).validate()
        if res:
            return res
        #NOTE(pshchelo): floating ip pool must be set for Neutron
        if (self.is_using_neutron()
                and not self.properties.get(self.FLOATING_IP_POOL)):
            msg = _("%s must be provided.") % self.FLOATING_IP_POOL
            raise exception.StackValidationFailed(message=msg)
Esempio n. 25
0
class VolumeAttachment(resource.Resource):
    PROPERTIES = (
        INSTANCE_ID,
        VOLUME_ID,
        DEVICE,
    ) = (
        'InstanceId',
        'VolumeId',
        'Device',
    )

    properties_schema = {
        INSTANCE_ID:
        properties.Schema(
            properties.Schema.STRING,
            _('The ID of the instance to which the volume attaches.'),
            required=True,
            update_allowed=True),
        VOLUME_ID:
        properties.Schema(properties.Schema.STRING,
                          _('The ID of the volume to be attached.'),
                          required=True,
                          update_allowed=True),
        DEVICE:
        properties.Schema(
            properties.Schema.STRING,
            _('The device where the volume is exposed on the instance. This '
              'assignment may not be honored and it is advised that the path '
              '/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
            required=True,
            update_allowed=True,
            constraints=[
                constraints.AllowedPattern('/dev/vd[b-z]'),
            ]),
    }

    def handle_create(self):
        server_id = self.properties[self.INSTANCE_ID]
        volume_id = self.properties[self.VOLUME_ID]
        dev = self.properties[self.DEVICE]

        attach_task = VolumeAttachTask(self.stack, server_id, volume_id, dev)
        attach_runner = scheduler.TaskRunner(attach_task)

        attach_runner.start()

        self.resource_id_set(attach_task.attachment_id)

        return attach_runner

    def check_create_complete(self, attach_runner):
        return attach_runner.step()

    def handle_delete(self):
        server_id = self.properties[self.INSTANCE_ID]
        detach_task = VolumeDetachTask(self.stack, server_id, self.resource_id)
        scheduler.TaskRunner(detach_task)()

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        checkers = []
        if prop_diff:
            # Even though some combinations of changed properties
            # could be updated in UpdateReplace manner,
            # we still first detach the old resource so that
            # self.resource_id is not replaced prematurely
            volume_id = self.properties.get(self.VOLUME_ID)
            if self.VOLUME_ID in prop_diff:
                volume_id = prop_diff.get(self.VOLUME_ID)

            device = self.properties.get(self.DEVICE)
            if self.DEVICE in prop_diff:
                device = prop_diff.get(self.DEVICE)

            server_id = self.properties.get(self.INSTANCE_ID)
            detach_task = VolumeDetachTask(self.stack, server_id,
                                           self.resource_id)
            checkers.append(scheduler.TaskRunner(detach_task))

            if self.INSTANCE_ID in prop_diff:
                server_id = prop_diff.get(self.INSTANCE_ID)
            attach_task = VolumeAttachTask(self.stack, server_id, volume_id,
                                           device)

            checkers.append(scheduler.TaskRunner(attach_task))

        if checkers:
            checkers[0].start()
        return checkers

    def check_update_complete(self, checkers):
        for checker in checkers:
            if not checker.started():
                checker.start()
            if not checker.step():
                return False
        self.resource_id_set(checkers[-1]._task.attachment_id)
        return True
Esempio n. 26
0
class SaharaClusterTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, DESCRIPTION,
        ANTI_AFFINITY, MANAGEMENT_NETWORK,
        CLUSTER_CONFIGS, NODE_GROUPS, IMAGE_ID, USE_AUTOCONFIG
    ) = (
        'name', 'plugin_name', 'hadoop_version', 'description',
        'anti_affinity', 'neutron_management_network',
        'cluster_configs', 'node_groups', 'default_image_id', 'use_autoconfig'
    )

    _NODE_GROUP_KEYS = (
        NG_NAME, COUNT, NG_TEMPLATE_ID,
    ) = (
        'name', 'count', 'node_group_template_id',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Cluster Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
            update_allowed=True
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Sahara Group Template.'),
            default="",
            update_allowed=True
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('sahara.plugin')
            ],
            update_allowed=True
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
            update_allowed=True
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the default image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
            update_allowed=True
        ),
        MANAGEMENT_NETWORK: properties.Schema(
            properties.Schema.STRING,
            _('Name or UUID of network.'),
            constraints=[
                constraints.CustomConstraint('neutron.network')
            ],
            update_allowed=True
        ),
        ANTI_AFFINITY: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to enable anti-affinity for."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
            update_allowed=True
        ),
        CLUSTER_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _('Cluster configs dictionary.'),
            update_allowed=True
        ),
        NODE_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _('Node groups.'),
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NG_NAME: properties.Schema(
                        properties.Schema.STRING,
                        _('Name of the Node group.'),
                        required=True
                    ),
                    COUNT: properties.Schema(
                        properties.Schema.INTEGER,
                        _("Number of instances in the Node group."),
                        required=True,
                        constraints=[
                            constraints.Range(min=1)
                        ]
                    ),
                    NG_TEMPLATE_ID: properties.Schema(
                        properties.Schema.STRING,
                        _("ID of the Node Group Template."),
                        required=True
                    ),
                }
            ),
            update_allowed=True
        ),
        USE_AUTOCONFIG: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Configure most important configs automatically."),
            support_status=support.SupportStatus(version='5.0.0')
        )
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    entity = 'cluster_templates'

    def _cluster_template_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())

    def _prepare_properties(self):
        props = {
            'name': self._cluster_template_name(),
            'plugin_name': self.properties[self.PLUGIN_NAME],
            'hadoop_version': self.properties[self.HADOOP_VERSION],
            'description': self.properties[self.DESCRIPTION],
            'cluster_configs': self.properties[self.CLUSTER_CONFIGS],
            'node_groups': self.properties[self.NODE_GROUPS],
            'anti_affinity': self.properties[self.ANTI_AFFINITY],
            'net_id': self.properties[self.MANAGEMENT_NETWORK],
            'default_image_id': self.properties[self.IMAGE_ID],
            'use_autoconfig': self.properties[self.USE_AUTOCONFIG],
        }
        if props['net_id']:
            if self.is_using_neutron():
                props['net_id'] = self.client_plugin(
                    'neutron').find_neutron_resource(
                    self.properties, self.MANAGEMENT_NETWORK, 'network')
            else:
                props['net_id'] = self.client_plugin(
                    'nova').get_nova_network_id(props['net_id'])
        return props

    def handle_create(self):
        args = self._prepare_properties()
        cluster_template = self.client().cluster_templates.create(**args)
        LOG.info(_LI("Cluster Template '%s' has been created"),
                 cluster_template.name)
        self.resource_id_set(cluster_template.id)
        return self.resource_id

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff:
            self.properties = json_snippet.properties(
                self.properties_schema,
                self.context)
            args = self._prepare_properties()
            self.client().cluster_templates.update(self.resource_id, **args)

    def validate(self):
        res = super(SaharaClusterTemplate, self).validate()
        if res:
            return res
        # check if running on neutron and MANAGEMENT_NETWORK missing
        if (self.is_using_neutron() and
                not self.properties[self.MANAGEMENT_NETWORK]):
            msg = _("%s must be provided"
                    ) % self.MANAGEMENT_NETWORK
            raise exception.StackValidationFailed(message=msg)

        self.client_plugin().validate_hadoop_version(
            self.properties[self.PLUGIN_NAME],
            self.properties[self.HADOOP_VERSION]
        )
Esempio n. 27
0
class SaharaNodeGroupTemplate(resource.Resource):

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
        VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE,
        SECURITY_GROUPS, AUTO_SECURITY_GROUP,
        AVAILABILITY_ZONE, VOLUMES_AVAILABILITY_ZONE,
        NODE_PROCESSES, FLOATING_IP_POOL, NODE_CONFIGS, IMAGE_ID,

    ) = (
        'name', 'plugin_name', 'hadoop_version', 'flavor', 'description',
        'volumes_per_node', 'volumes_size', 'volume_type',
        'security_groups', 'auto_security_group',
        'availability_zone', 'volumes_availability_zone',
        'node_processes', 'floating_ip_pool', 'node_configs', 'image_id',
    )

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _("Name for the Sahara Node Group Template."),
            constraints=[
                constraints.Length(min=1, max=50),
                constraints.AllowedPattern(SAHARA_NAME_REGEX),
            ],
        ),
        DESCRIPTION: properties.Schema(
            properties.Schema.STRING,
            _('Description of the Node Group Template.'),
            default="",
        ),
        PLUGIN_NAME: properties.Schema(
            properties.Schema.STRING,
            _('Plugin name.'),
            required=True,
        ),
        HADOOP_VERSION: properties.Schema(
            properties.Schema.STRING,
            _('Version of Hadoop running on instances.'),
            required=True,
        ),
        FLAVOR: properties.Schema(
            properties.Schema.STRING,
            _('Name or ID Nova flavor for the nodes.'),
            required=True,
            constraints=[
                constraints.CustomConstraint('nova.flavor')
            ]
        ),
        VOLUMES_PER_NODE: properties.Schema(
            properties.Schema.INTEGER,
            _("Volumes per node."),
            constraints=[
                constraints.Range(min=0),
            ],
        ),
        VOLUMES_SIZE: properties.Schema(
            properties.Schema.INTEGER,
            _("Size of the volumes, in GB."),
            constraints=[
                constraints.Range(min=1),
            ],
        ),
        VOLUME_TYPE: properties.Schema(
            properties.Schema.STRING,
            _("Type of the volume to create on Cinder backend."),
            constraints=[
                constraints.CustomConstraint('cinder.vtype')
            ]
        ),
        SECURITY_GROUPS: properties.Schema(
            properties.Schema.LIST,
            _("List of security group names or IDs to assign to this "
              "Node Group template."),
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
        ),
        AUTO_SECURITY_GROUP: properties.Schema(
            properties.Schema.BOOLEAN,
            _("Defines whether auto-assign security group to this "
              "Node Group template."),
        ),
        AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create servers in."),
        ),
        VOLUMES_AVAILABILITY_ZONE: properties.Schema(
            properties.Schema.STRING,
            _("Availability zone to create volumes in."),
        ),
        NODE_PROCESSES: properties.Schema(
            properties.Schema.LIST,
            _("List of processes to run on every node."),
            required=True,
            constraints=[
                constraints.Length(min=1),
            ],
            schema=properties.Schema(
                properties.Schema.STRING,
            ),
        ),
        FLOATING_IP_POOL: properties.Schema(
            properties.Schema.STRING,
            _("Name or UUID of the Neutron floating IP network or "
              "name of the Nova floating ip pool to use. "
              "Should not be provided when used with Nova-network "
              "that auto-assign floating IPs."),
        ),
        NODE_CONFIGS: properties.Schema(
            properties.Schema.MAP,
            _("Dictionary of node configurations."),
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _("ID of the image to use for the template."),
            constraints=[
                constraints.CustomConstraint('sahara.image'),
            ],
        ),
    }

    default_client_name = 'sahara'

    physical_resource_name_limit = 50

    def _ngt_name(self):
        name = self.properties[self.NAME]
        if name:
            return name
        return self.physical_resource_name()

    def handle_create(self):
        plugin_name = self.properties[self.PLUGIN_NAME]
        hadoop_version = self.properties[self.HADOOP_VERSION]
        node_processes = self.properties[self.NODE_PROCESSES]
        description = self.properties[self.DESCRIPTION]
        flavor_id = self.client_plugin("nova").get_flavor_id(
            self.properties[self.FLAVOR])
        volumes_per_node = self.properties[self.VOLUMES_PER_NODE]
        volumes_size = self.properties[self.VOLUMES_SIZE]
        volume_type = self.properties[self.VOLUME_TYPE]
        floating_ip_pool = self.properties[self.FLOATING_IP_POOL]
        security_groups = self.properties[self.SECURITY_GROUPS]
        auto_security_group = self.properties[self.AUTO_SECURITY_GROUP]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]
        vol_availability_zone = self.properties[self.VOLUMES_AVAILABILITY_ZONE]
        image_id = self.properties[self.IMAGE_ID]
        if floating_ip_pool and self.is_using_neutron():
            floating_ip_pool = self.client_plugin(
                'neutron').find_neutron_resource(
                    self.properties, self.FLOATING_IP_POOL, 'network')
        node_configs = self.properties[self.NODE_CONFIGS]

        node_group_template = self.client().node_group_templates.create(
            self._ngt_name(),
            plugin_name, hadoop_version, flavor_id,
            description=description,
            volumes_per_node=volumes_per_node,
            volumes_size=volumes_size,
            volume_type=volume_type,
            node_processes=node_processes,
            floating_ip_pool=floating_ip_pool,
            node_configs=node_configs,
            security_groups=security_groups,
            auto_security_group=auto_security_group,
            availability_zone=availability_zone,
            volumes_availability_zone=vol_availability_zone,
            image_id=image_id
        )
        LOG.info(_LI("Node Group Template '%s' has been created"),
                 node_group_template.name)
        self.resource_id_set(node_group_template.id)
        return self.resource_id

    def handle_delete(self):
        if not self.resource_id:
            return
        try:
            self.client().node_group_templates.delete(
                self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        LOG.info(_LI("Node Group Template '%s' has been deleted."),
                 self._ngt_name())

    def validate(self):
        res = super(SaharaNodeGroupTemplate, self).validate()
        if res:
            return res
        pool = self.properties[self.FLOATING_IP_POOL]
        if pool:
            if self.is_using_neutron():
                try:
                    self.client_plugin('neutron').find_neutron_resource(
                        self.properties, self.FLOATING_IP_POOL, 'network')
                except Exception as ex:
                    if (self.client_plugin('neutron').is_not_found(ex)
                            or self.client_plugin('neutron').is_no_unique(ex)):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise
            else:
                try:
                    self.client('nova').floating_ip_pools.find(name=pool)
                except Exception as ex:
                    if self.client_plugin('nova').is_not_found(ex):
                        raise exception.StackValidationFailed(
                            message=ex.message)
                    raise
Esempio n. 28
0
class GlanceImage(resource.Resource):
    """A resource managing images in Glance.

    A resource provides managing images that are meant to be used with other
    services.
    """

    support_status = support.SupportStatus(version='2014.2')

    PROPERTIES = (
        NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
        DISK_FORMAT, CONTAINER_FORMAT, LOCATION, TAGS, EXTRA_PROPERTIES,
        ARCHITECTURE, KERNEL_ID, OS_DISTRO, OWNER, RAMDISK_ID
    ) = (
        'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected',
        'disk_format', 'container_format', 'location', 'tags',
        'extra_properties', 'architecture', 'kernel_id', 'os_distro',
        'owner', 'ramdisk_id'
    )

    glance_id_pattern = ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
                         '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$')

    properties_schema = {
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name for the image. The name of an image is not '
              'unique to a Image Service node.')
        ),
        IMAGE_ID: properties.Schema(
            properties.Schema.STRING,
            _('The image ID. Glance will generate a UUID if not specified.')
        ),
        IS_PUBLIC: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Scope of image accessibility. Public or private. '
              'Default value is False means private. Note: The policy '
              'setting of glance allows only users with admin roles to create '
              'public image by default.'),
            default=False,
        ),
        MIN_DISK: properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of disk space (in GB) required to boot image. '
              'Default value is 0 if not specified '
              'and means no limit on the disk size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0
        ),
        MIN_RAM: properties.Schema(
            properties.Schema.INTEGER,
            _('Amount of ram (in MB) required to boot image. Default value '
              'is 0 if not specified and means no limit on the ram size.'),
            constraints=[
                constraints.Range(min=0),
            ],
            default=0
        ),
        PROTECTED: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Whether the image can be deleted. If the value is True, '
              'the image is protected and cannot be deleted.'),
            default=False
        ),
        DISK_FORMAT: properties.Schema(
            properties.Schema.STRING,
            _('Disk format of image.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ami', 'ari', 'aki',
                                           'vhd', 'vmdk', 'raw',
                                           'qcow2', 'vdi', 'iso'])
            ]
        ),
        CONTAINER_FORMAT: properties.Schema(
            properties.Schema.STRING,
            _('Container format of image.'),
            required=True,
            constraints=[
                constraints.AllowedValues(['ami', 'ari', 'aki',
                                           'bare', 'ova', 'ovf'])
            ]
        ),
        LOCATION: properties.Schema(
            properties.Schema.STRING,
            _('URL where the data for this image already resides. For '
              'example, if the image data is stored in swift, you could '
              'specify "swift://example.com/container/obj".'),
            required=True,
        ),
        TAGS: properties.Schema(
            properties.Schema.LIST,
            _('List of image tags.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')
        ),
        EXTRA_PROPERTIES: properties.Schema(
            properties.Schema.MAP,
            _('Arbitrary properties to associate with the image.'),
            update_allowed=True,
            default={},
            support_status=support.SupportStatus(version='7.0.0')
        ),
        ARCHITECTURE: properties.Schema(
            properties.Schema.STRING,
            _('Operating system architecture.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')
        ),
        KERNEL_ID: properties.Schema(
            properties.Schema.STRING,
            _('ID of image stored in Glance that should be used as '
              'the kernel when booting an AMI-style image.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0'),
            constraints=[
                constraints.AllowedPattern(glance_id_pattern)
            ]
        ),
        OS_DISTRO: properties.Schema(
            properties.Schema.STRING,
            _('The common name of the operating system distribution '
              'in lowercase.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')
        ),
        OWNER: properties.Schema(
            properties.Schema.STRING,
            _('Owner of the image.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0')
        ),
        RAMDISK_ID: properties.Schema(
            properties.Schema.STRING,
            _('ID of image stored in Glance that should be used as '
              'the ramdisk when booting an AMI-style image.'),
            update_allowed=True,
            support_status=support.SupportStatus(version='7.0.0'),
            constraints=[
                constraints.AllowedPattern(glance_id_pattern)
            ]
        )
    }

    default_client_name = 'glance'

    entity = 'images'

    def handle_create(self):
        args = dict((k, v) for k, v in self.properties.items()
                    if v is not None)

        tags = args.pop(self.TAGS, [])
        args['properties'] = args.pop(self.EXTRA_PROPERTIES, {})
        architecture = args.pop(self.ARCHITECTURE, None)
        kernel_id = args.pop(self.KERNEL_ID, None)
        os_distro = args.pop(self.OS_DISTRO, None)
        ramdisk_id = args.pop(self.RAMDISK_ID, None)

        image_id = self.client().images.create(**args).id
        self.resource_id_set(image_id)

        v2_images = self.client(version=self.client_plugin().V2).images
        if architecture is not None:
            v2_images.update(image_id, architecture=architecture)
        if kernel_id is not None:
            v2_images.update(image_id, kernel_id=kernel_id)
        if os_distro is not None:
            v2_images.update(image_id, os_distro=os_distro)
        if ramdisk_id is not None:
            v2_images.update(image_id, ramdisk_id=ramdisk_id)

        for tag in tags:
            self.client(
                version=self.client_plugin().V2).image_tags.update(
                image_id,
                tag)

        return image_id

    def check_create_complete(self, image_id):
        image = self.client().images.get(image_id)
        return image.status == 'active'

    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if prop_diff and self.TAGS in prop_diff:
            existing_tags = self.properties.get(self.TAGS) or []
            diff_tags = prop_diff.pop(self.TAGS) or []

            new_tags = set(diff_tags) - set(existing_tags)
            for tag in new_tags:
                self.client(
                    version=self.client_plugin().V2).image_tags.update(
                    self.resource_id,
                    tag)

            removed_tags = set(existing_tags) - set(diff_tags)
            for tag in removed_tags:
                with self.client_plugin().ignore_not_found:
                    self.client(
                        version=self.client_plugin().V2).image_tags.delete(
                        self.resource_id,
                        tag)

        v2_images = self.client(version=self.client_plugin().V2).images

        if self.EXTRA_PROPERTIES in prop_diff:
            old_properties = self.properties.get(self.EXTRA_PROPERTIES) or {}
            new_properties = prop_diff.pop(self.EXTRA_PROPERTIES)
            prop_diff.update(new_properties)
            remove_props = list(set(old_properties) - set(new_properties))

            # Though remove_props defaults to None within the glanceclient,
            # setting it to a list (possibly []) every time ensures only one
            # calling format to images.update
            v2_images.update(self.resource_id, remove_props, **prop_diff)
        else:
            v2_images.update(self.resource_id, **prop_diff)

    def _show_resource(self):
        if self.glance().version == 1.0:
            return super(GlanceImage, self)._show_resource()
        else:
            image = self.glance().images.get(self.resource_id)
            return dict(image)

    def validate(self):
        super(GlanceImage, self).validate()
        container_format = self.properties[self.CONTAINER_FORMAT]
        if (container_format in ['ami', 'ari', 'aki']
                and self.properties[self.DISK_FORMAT] != container_format):
            msg = _("Invalid mix of disk and container formats. When "
                    "setting a disk or container format to one of 'aki', "
                    "'ari', or 'ami', the container and disk formats must "
                    "match.")
            raise exception.StackValidationFailed(message=msg)

    def get_live_resource_data(self):
        image_data = super(GlanceImage, self).get_live_resource_data()
        if image_data.get('status') in ('deleted', 'killed'):
                raise exception.EntityNotFound(entity='Resource',
                                               name=self.name)
        return image_data

    def parse_live_resource_data(self, resource_properties, resource_data):
        image_reality = {}

        # NOTE(prazumovsky): At first, there's no way to get location from
        # glance; at second, location property is doubtful, because glance
        # client v2 doesn't use location, it uses locations. So, we should
        # get location property from resource properties.
        if self.client().version == 1.0:
            image_reality.update(
                {self.LOCATION: resource_properties[self.LOCATION]})

        for key in self.PROPERTIES:
            if key == self.LOCATION:
                continue
            if key == self.IMAGE_ID:
                if (resource_properties.get(self.IMAGE_ID) is not None or
                        resource_data.get(self.IMAGE_ID) != self.resource_id):
                    image_reality.update({self.IMAGE_ID: resource_data.get(
                        self.IMAGE_ID)})
                else:
                    image_reality.update({self.IMAGE_ID: None})
            else:
                image_reality.update({key: resource_data.get(key)})

        return image_reality
Esempio n. 29
0
class OSDBInstance(resource.Resource):
    '''
    OpenStack cloud database instance resource.
    '''

    support_status = support.SupportStatus(version='2014.1')

    TROVE_STATUS = (
        ERROR,
        FAILED,
        ACTIVE,
    ) = (
        'ERROR',
        'FAILED',
        'ACTIVE',
    )

    TROVE_STATUS_REASON = {
        FAILED:
        _('The database instance was created, but heat failed to set '
          'up the datastore. If a database instance is in the FAILED '
          'state, it should be deleted and a new one should be '
          'created.'),
        ERROR:
        _('The last operation for the database instance failed due to '
          'an error.'),
    }

    BAD_STATUSES = (ERROR, FAILED)
    PROPERTIES = (
        NAME,
        FLAVOR,
        SIZE,
        DATABASES,
        USERS,
        AVAILABILITY_ZONE,
        RESTORE_POINT,
        DATASTORE_TYPE,
        DATASTORE_VERSION,
        NICS,
        REPLICA_OF,
        REPLICA_COUNT,
    ) = ('name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
         'restore_point', 'datastore_type', 'datastore_version', 'networks',
         'replica_of', 'replica_count')

    _DATABASE_KEYS = (
        DATABASE_CHARACTER_SET,
        DATABASE_COLLATE,
        DATABASE_NAME,
    ) = (
        'character_set',
        'collate',
        'name',
    )

    _USER_KEYS = (
        USER_NAME,
        USER_PASSWORD,
        USER_HOST,
        USER_DATABASES,
    ) = (
        'name',
        'password',
        'host',
        'databases',
    )

    _NICS_KEYS = (NET, PORT, V4_FIXED_IP) = ('network', 'port', 'fixed_ip')

    ATTRIBUTES = (
        HOSTNAME,
        HREF,
    ) = (
        'hostname',
        'href',
    )

    properties_schema = {
        NAME:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the DB instance to create.'),
                          constraints=[
                              constraints.Length(max=255),
                          ]),
        FLAVOR:
        properties.Schema(
            properties.Schema.STRING,
            _('Reference to a flavor for creating DB instance.'),
            required=True,
            constraints=[constraints.CustomConstraint('trove.flavor')]),
        DATASTORE_TYPE:
        properties.Schema(properties.Schema.STRING,
                          _("Name of registered datastore type."),
                          constraints=[constraints.Length(max=255)]),
        DATASTORE_VERSION:
        properties.Schema(
            properties.Schema.STRING,
            _("Name of the registered datastore version. "
              "It must exist for provided datastore type. "
              "Defaults to using single active version. "
              "If several active versions exist for provided datastore type, "
              "explicit value for this parameter must be specified."),
            constraints=[constraints.Length(max=255)]),
        SIZE:
        properties.Schema(properties.Schema.INTEGER,
                          _('Database volume size in GB.'),
                          required=True,
                          constraints=[
                              constraints.Range(1, 150),
                          ]),
        NICS:
        properties.Schema(
            properties.Schema.LIST,
            _("List of network interfaces to create on instance."),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    NET:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of the network to attach this NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.network')
                        ]),
                    PORT:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Name or UUID of Neutron port to attach this '
                          'NIC to. '
                          'Either %(port)s or %(net)s must be specified.') % {
                              'port': PORT,
                              'net': NET
                          },
                        constraints=[
                            constraints.CustomConstraint('neutron.port')
                        ],
                    ),
                    V4_FIXED_IP:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Fixed IPv4 address for this NIC.'),
                        constraints=[constraints.CustomConstraint('ip_addr')]),
                },
            ),
        ),
        DATABASES:
        properties.Schema(
            properties.Schema.LIST,
            _('List of databases to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    DATABASE_CHARACTER_SET:
                    properties.Schema(properties.Schema.STRING,
                                      _('Set of symbols and encodings.'),
                                      default='utf8'),
                    DATABASE_COLLATE:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Set of rules for comparing characters in a '
                          'character set.'),
                        default='utf8_general_ci'),
                    DATABASE_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('Specifies database names for creating '
                          'databases on instance creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=64),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                },
            )),
        USERS:
        properties.Schema(
            properties.Schema.LIST,
            _('List of users to be created on DB instance creation.'),
            default=[],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    USER_NAME:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('User name to create a user on instance '
                          'creation.'),
                        required=True,
                        constraints=[
                            constraints.Length(max=16),
                            constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
                                                       r'[a-zA-Z0-9_@?#\s]*'
                                                       r'[a-zA-Z0-9_]+'),
                        ]),
                    USER_PASSWORD:
                    properties.Schema(properties.Schema.STRING,
                                      _('Password for those users on instance '
                                        'creation.'),
                                      required=True,
                                      constraints=[
                                          constraints.AllowedPattern(
                                              r'[a-zA-Z0-9_]+'
                                              r'[a-zA-Z0-9_@?#\s]*'
                                              r'[a-zA-Z0-9_]+'),
                                      ]),
                    USER_HOST:
                    properties.Schema(
                        properties.Schema.STRING,
                        _('The host from which a user is allowed to '
                          'connect to the database.'),
                        default='%'),
                    USER_DATABASES:
                    properties.Schema(
                        properties.Schema.LIST,
                        _('Names of databases that those users can '
                          'access on instance creation.'),
                        schema=properties.Schema(properties.Schema.STRING, ),
                        required=True,
                        constraints=[
                            constraints.Length(min=1),
                        ]),
                },
            )),
        AVAILABILITY_ZONE:
        properties.Schema(properties.Schema.STRING,
                          _('Name of the availability zone for DB instance.')),
        RESTORE_POINT:
        properties.Schema(properties.Schema.STRING,
                          _('DB instance restore point.')),
        REPLICA_OF:
        properties.Schema(
            properties.Schema.STRING,
            _('Identifier of the source instance to replicate.'),
            support_status=support.SupportStatus(version='5.0.0')),
        REPLICA_COUNT:
        properties.Schema(
            properties.Schema.INTEGER,
            _('The number of replicas to be created.'),
            support_status=support.SupportStatus(version='5.0.0')),
    }

    attributes_schema = {
        HOSTNAME:
        attributes.Schema(_("Hostname of the instance."),
                          type=attributes.Schema.STRING),
        HREF:
        attributes.Schema(_("Api endpoint reference of the instance."),
                          type=attributes.Schema.STRING),
    }

    default_client_name = 'trove'

    def __init__(self, name, json_snippet, stack):
        super(OSDBInstance, self).__init__(name, json_snippet, stack)
        self._href = None
        self._dbinstance = None

    @property
    def dbinstance(self):
        """Get the trove dbinstance."""
        if not self._dbinstance and self.resource_id:
            self._dbinstance = self.client().instances.get(self.resource_id)

        return self._dbinstance

    def _dbinstance_name(self):
        name = self.properties[self.NAME]
        if name:
            return name

        return self.physical_resource_name()

    def handle_create(self):
        '''
        Create cloud database instance.
        '''
        self.flavor = self.client_plugin().get_flavor_id(
            self.properties[self.FLAVOR])
        self.volume = {'size': self.properties[self.SIZE]}
        self.databases = self.properties[self.DATABASES]
        self.users = self.properties[self.USERS]
        restore_point = self.properties[self.RESTORE_POINT]
        if restore_point:
            restore_point = {"backupRef": restore_point}
        zone = self.properties[self.AVAILABILITY_ZONE]
        self.datastore_type = self.properties[self.DATASTORE_TYPE]
        self.datastore_version = self.properties[self.DATASTORE_VERSION]
        replica_of = self.properties[self.REPLICA_OF]
        replica_count = self.properties[self.REPLICA_COUNT]

        # convert user databases to format required for troveclient.
        # that is, list of database dictionaries
        for user in self.users:
            dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
            user[self.USER_DATABASES] = dbs

        # convert networks to format required by troveclient
        nics = []
        for nic in self.properties[self.NICS]:
            nic_dict = {}
            net = nic.get(self.NET)
            if net:
                if self.is_using_neutron():
                    net_id = (
                        self.client_plugin('neutron').find_neutron_resource(
                            nic, self.NET, 'network'))
                else:
                    net_id = (
                        self.client_plugin('nova').get_nova_network_id(net))
                nic_dict['net-id'] = net_id
            port = nic.get(self.PORT)
            if port:
                neutron = self.client_plugin('neutron')
                nic_dict['port-id'] = neutron.find_neutron_resource(
                    self.properties, self.PORT, 'port')
            ip = nic.get(self.V4_FIXED_IP)
            if ip:
                nic_dict['v4-fixed-ip'] = ip
            nics.append(nic_dict)

        # create db instance
        instance = self.client().instances.create(
            self._dbinstance_name(),
            self.flavor,
            volume=self.volume,
            databases=self.databases,
            users=self.users,
            restorePoint=restore_point,
            availability_zone=zone,
            datastore=self.datastore_type,
            datastore_version=self.datastore_version,
            nics=nics,
            replica_of=replica_of,
            replica_count=replica_count)
        self.resource_id_set(instance.id)

        return instance.id

    def _refresh_instance(self, instance_id):
        try:
            instance = self.client().instances.get(instance_id)
            return instance
        except Exception as exc:
            if self.client_plugin().is_over_limit(exc):
                LOG.warn(
                    _LW("Stack %(name)s (%(id)s) received an "
                        "OverLimit response during instance.get():"
                        " %(exception)s"), {
                            'name': self.stack.name,
                            'id': self.stack.id,
                            'exception': exc
                        })
                return None
            else:
                raise

    def check_create_complete(self, instance_id):
        '''
        Check if cloud DB instance creation is complete.
        '''
        instance = self._refresh_instance(instance_id)  # refresh attributes
        if instance is None:
            return False
        if instance.status in self.BAD_STATUSES:
            raise resource.ResourceInError(
                resource_status=instance.status,
                status_reason=self.TROVE_STATUS_REASON.get(
                    instance.status, _("Unknown")))

        if instance.status != self.ACTIVE:
            return False
        LOG.info(
            _LI("Database instance %(database)s created (flavor:%("
                "flavor)s,volume:%(volume)s, datastore:%("
                "datastore_type)s, datastore_version:%("
                "datastore_version)s)"), {
                    'database': self._dbinstance_name(),
                    'flavor': self.flavor,
                    'volume': self.volume,
                    'datastore_type': self.datastore_type,
                    'datastore_version': self.datastore_version
                })
        return True

    def handle_check(self):
        instance = self.client().instances.get(self.resource_id)
        status = instance.status
        checks = [
            {
                'attr': 'status',
                'expected': self.ACTIVE,
                'current': status
            },
        ]
        self._verify_check_conditions(checks)

    def handle_delete(self):
        '''
        Delete a cloud database instance.
        '''
        if not self.resource_id:
            return

        try:
            instance = self.client().instances.get(self.resource_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
        else:
            instance.delete()
            return instance.id

    def check_delete_complete(self, instance_id):
        '''
        Check for completion of cloud DB instance deletion
        '''
        if not instance_id:
            return True

        try:
            # For some time trove instance may continue to live
            self._refresh_instance(instance_id)
        except Exception as ex:
            self.client_plugin().ignore_not_found(ex)
            return True

        return False

    def validate(self):
        '''
        Validate any of the provided params
        '''
        res = super(OSDBInstance, self).validate()
        if res:
            return res

        datastore_type = self.properties[self.DATASTORE_TYPE]
        datastore_version = self.properties[self.DATASTORE_VERSION]

        self.client_plugin().validate_datastore(datastore_type,
                                                datastore_version,
                                                self.DATASTORE_TYPE,
                                                self.DATASTORE_VERSION)

        # check validity of user and databases
        users = self.properties[self.USERS]
        if users:
            databases = self.properties[self.DATABASES]
            if not databases:
                msg = _('Databases property is required if users property '
                        'is provided for resource %s.') % self.name
                raise exception.StackValidationFailed(message=msg)

            db_names = set([db[self.DATABASE_NAME] for db in databases])
            for user in users:
                missing_db = [
                    db_name for db_name in user[self.USER_DATABASES]
                    if db_name not in db_names
                ]

                if missing_db:
                    msg = (_('Database %(dbs)s specified for user does '
                             'not exist in databases for resource %(name)s.') %
                           {
                               'dbs': missing_db,
                               'name': self.name
                           })
                    raise exception.StackValidationFailed(message=msg)

        # check validity of NICS
        is_neutron = self.is_using_neutron()
        nics = self.properties[self.NICS]
        for nic in nics:
            if not is_neutron and nic.get(self.PORT):
                msg = _("Can not use %s property on Nova-network.") % self.PORT
                raise exception.StackValidationFailed(message=msg)

            if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
                msg = _("Either %(net)s or %(port)s must be provided.") % {
                    'net': self.NET,
                    'port': self.PORT
                }
                raise exception.StackValidationFailed(message=msg)

    def href(self):
        if not self._href and self.dbinstance:
            if not self.dbinstance.links:
                self._href = None
            else:
                for link in self.dbinstance.links:
                    if link['rel'] == 'self':
                        self._href = link[self.HREF]
                        break

        return self._href

    def _resolve_attribute(self, name):
        if name == self.HOSTNAME:
            return self.dbinstance.hostname
        elif name == self.HREF:
            return self.href()
Esempio n. 30
0
class DockerContainer(resource.Resource):

    support_status = support.SupportStatus(
        status=support.UNSUPPORTED,
        message=_('This resource is not supported, use at your own risk.'))

    PROPERTIES = (
        DOCKER_ENDPOINT, HOSTNAME, USER, MEMORY, PORT_SPECS,
        PRIVILEGED, TTY, OPEN_STDIN, STDIN_ONCE, ENV, CMD, DNS,
        IMAGE, VOLUMES, VOLUMES_FROM, PORT_BINDINGS, LINKS, NAME,
        RESTART_POLICY, CAP_ADD, CAP_DROP, READ_ONLY, CPU_SHARES,
        DEVICES, CPU_SET
    ) = (
        'docker_endpoint', 'hostname', 'user', 'memory', 'port_specs',
        'privileged', 'tty', 'open_stdin', 'stdin_once', 'env', 'cmd', 'dns',
        'image', 'volumes', 'volumes_from', 'port_bindings', 'links', 'name',
        'restart_policy', 'cap_add', 'cap_drop', 'read_only', 'cpu_shares',
        'devices', 'cpu_set'
    )

    ATTRIBUTES = (
        INFO, NETWORK_INFO, NETWORK_IP, NETWORK_GATEWAY,
        NETWORK_TCP_PORTS, NETWORK_UDP_PORTS, LOGS, LOGS_HEAD,
        LOGS_TAIL,
    ) = (
        'info', 'network_info', 'network_ip', 'network_gateway',
        'network_tcp_ports', 'network_udp_ports', 'logs', 'logs_head',
        'logs_tail',
    )

    _RESTART_POLICY_KEYS = (
        POLICY_NAME, POLICY_MAXIMUM_RETRY_COUNT,
    ) = (
        'Name', 'MaximumRetryCount',
    )

    _DEVICES_KEYS = (
        PATH_ON_HOST, PATH_IN_CONTAINER, PERMISSIONS
    ) = (
        'path_on_host', 'path_in_container', 'permissions'
    )

    _CAPABILITIES = ['SETPCAP', 'SYS_MODULE', 'SYS_RAWIO', 'SYS_PACCT',
                     'SYS_ADMIN', 'SYS_NICE', 'SYS_RESOURCE', 'SYS_TIME',
                     'SYS_TTY_CONFIG', 'MKNOD', 'AUDIT_WRITE',
                     'AUDIT_CONTROL', 'MAC_OVERRIDE', 'MAC_ADMIN',
                     'NET_ADMIN', 'SYSLOG', 'CHOWN', 'NET_RAW',
                     'DAC_OVERRIDE', 'FOWNER', 'DAC_READ_SEARCH', 'FSETID',
                     'KILL', 'SETGID', 'SETUID', 'LINUX_IMMUTABLE',
                     'NET_BIND_SERVICE', 'NET_BROADCAST', 'IPC_LOCK',
                     'IPC_OWNER', 'SYS_CHROOT', 'SYS_PTRACE', 'SYS_BOOT',
                     'LEASE', 'SETFCAP', 'WAKE_ALARM', 'BLOCK_SUSPEND', 'ALL']

    properties_schema = {
        DOCKER_ENDPOINT: properties.Schema(
            properties.Schema.STRING,
            _('Docker daemon endpoint (by default the local docker daemon '
              'will be used).'),
            default=None
        ),
        HOSTNAME: properties.Schema(
            properties.Schema.STRING,
            _('Hostname of the container.'),
            default=''
        ),
        USER: properties.Schema(
            properties.Schema.STRING,
            _('Username or UID.'),
            default=''
        ),
        MEMORY: properties.Schema(
            properties.Schema.INTEGER,
            _('Memory limit (Bytes).'),
            default=0
        ),
        PORT_SPECS: properties.Schema(
            properties.Schema.LIST,
            _('TCP/UDP ports mapping.'),
            default=None
        ),
        PORT_BINDINGS: properties.Schema(
            properties.Schema.MAP,
            _('TCP/UDP ports bindings.'),
        ),
        LINKS: properties.Schema(
            properties.Schema.MAP,
            _('Links to other containers.'),
        ),
        NAME: properties.Schema(
            properties.Schema.STRING,
            _('Name of the container.'),
        ),
        PRIVILEGED: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Enable extended privileges.'),
            default=False
        ),
        TTY: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Allocate a pseudo-tty.'),
            default=False
        ),
        OPEN_STDIN: properties.Schema(
            properties.Schema.BOOLEAN,
            _('Open stdin.'),
            default=False
        ),
        STDIN_ONCE: properties.Schema(
            properties.Schema.BOOLEAN,
            _('If true, close stdin after the 1 attached client disconnects.'),
            default=False
        ),
        ENV: properties.Schema(
            properties.Schema.LIST,
            _('Set environment variables.'),
        ),
        CMD: properties.Schema(
            properties.Schema.LIST,
            _('Command to run after spawning the container.'),
            default=[]
        ),
        DNS: properties.Schema(
            properties.Schema.LIST,
            _('Set custom dns servers.'),
        ),
        IMAGE: properties.Schema(
            properties.Schema.STRING,
            _('Image name.')
        ),
        VOLUMES: properties.Schema(
            properties.Schema.MAP,
            _('Create a bind mount.'),
            default={}
        ),
        VOLUMES_FROM: properties.Schema(
            properties.Schema.LIST,
            _('Mount all specified volumes.'),
            default=''
        ),
        RESTART_POLICY: properties.Schema(
            properties.Schema.MAP,
            _('Restart policies (only supported for API version >= 1.2.0).'),
            schema={
                POLICY_NAME: properties.Schema(
                    properties.Schema.STRING,
                    _('The behavior to apply when the container exits.'),
                    default='no',
                    constraints=[
                        constraints.AllowedValues(['no', 'on-failure',
                                                   'always']),
                    ]
                ),
                POLICY_MAXIMUM_RETRY_COUNT: properties.Schema(
                    properties.Schema.INTEGER,
                    _('A maximum restart count for the '
                      'on-failure policy.'),
                    default=0
                )
            },
            default={},
            support_status=support.SupportStatus(version='2015.1')
        ),
        CAP_ADD: properties.Schema(
            properties.Schema.LIST,
            _('Be used to add kernel capabilities (only supported for '
              'API version >= 1.2.0).'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('The security features provided by Linux kernels.'),
                constraints=[
                    constraints.AllowedValues(_CAPABILITIES),
                ]
            ),
            default=[],
            support_status=support.SupportStatus(version='2015.1')
        ),
        CAP_DROP: properties.Schema(
            properties.Schema.LIST,
            _('Be used to drop kernel capabilities (only supported for '
              'API version >= 1.2.0).'),
            schema=properties.Schema(
                properties.Schema.STRING,
                _('The security features provided by Linux kernels.'),
                constraints=[
                    constraints.AllowedValues(_CAPABILITIES),
                ]
            ),
            default=[],
            support_status=support.SupportStatus(version='2015.1')
        ),
        READ_ONLY: properties.Schema(
            properties.Schema.BOOLEAN,
            _('If true, mount the container\'s root filesystem '
              'as read only (only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['read_only'],
            default=False,
            support_status=support.SupportStatus(version='2015.1'),
        ),
        CPU_SHARES: properties.Schema(
            properties.Schema.INTEGER,
            _('Relative weight which determines the allocation of the CPU '
              'processing power(only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['cpu_shares'],
            default=0,
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        DEVICES: properties.Schema(
            properties.Schema.LIST,
            _('Device mappings (only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['devices'],
            schema=properties.Schema(
                properties.Schema.MAP,
                schema={
                    PATH_ON_HOST: properties.Schema(
                        properties.Schema.STRING,
                        _('The device path on the host.'),
                        constraints=[
                            constraints.Length(max=255),
                            constraints.AllowedPattern(DEVICE_PATH_REGEX),
                        ],
                        required=True
                    ),
                    PATH_IN_CONTAINER: properties.Schema(
                        properties.Schema.STRING,
                        _('The device path of the container'
                          ' mappings to the host.'),
                        constraints=[
                            constraints.Length(max=255),
                            constraints.AllowedPattern(DEVICE_PATH_REGEX),
                        ],
                    ),
                    PERMISSIONS: properties.Schema(
                        properties.Schema.STRING,
                        _('The permissions of the container to'
                          ' read/write/create the devices.'),
                        constraints=[
                            constraints.AllowedValues(['r', 'w', 'm',
                                                       'rw', 'rm', 'wm',
                                                       'rwm']),
                        ],
                        default='rwm'
                    )
                }
            ),
            default=[],
            support_status=support.SupportStatus(version='5.0.0'),
        ),
        CPU_SET: properties.Schema(
            properties.Schema.STRING,
            _('The CPUs in which to allow execution '
              '(only supported for API version >= %s).') %
            MIN_API_VERSION_MAP['cpu_set'],
            support_status=support.SupportStatus(version='5.0.0'),
        )
    }

    attributes_schema = {
        INFO: attributes.Schema(
            _('Container info.')
        ),
        NETWORK_INFO: attributes.Schema(
            _('Container network info.')
        ),
        NETWORK_IP: attributes.Schema(
            _('Container ip address.')
        ),
        NETWORK_GATEWAY: attributes.Schema(
            _('Container ip gateway.')
        ),
        NETWORK_TCP_PORTS: attributes.Schema(
            _('Container TCP ports.')
        ),
        NETWORK_UDP_PORTS: attributes.Schema(
            _('Container UDP ports.')
        ),
        LOGS: attributes.Schema(
            _('Container logs.')
        ),
        LOGS_HEAD: attributes.Schema(
            _('Container first logs line.')
        ),
        LOGS_TAIL: attributes.Schema(
            _('Container last logs line.')
        ),
    }

    def get_client(self):
        client = None
        if DOCKER_INSTALLED:
            endpoint = self.properties.get(self.DOCKER_ENDPOINT)
            if endpoint:
                client = docker.Client(endpoint)
            else:
                client = docker.Client()
        return client

    def _parse_networkinfo_ports(self, networkinfo):
        tcp = []
        udp = []
        for port, info in six.iteritems(networkinfo['Ports']):
            p = port.split('/')
            if not info or len(p) != 2 or 'HostPort' not in info[0]:
                continue
            port = info[0]['HostPort']
            if p[1] == 'tcp':
                tcp.append(port)
            elif p[1] == 'udp':
                udp.append(port)
        return (','.join(tcp), ','.join(udp))

    def _container_networkinfo(self, client, resource_id):
        info = client.inspect_container(self.resource_id)
        networkinfo = info['NetworkSettings']
        ports = self._parse_networkinfo_ports(networkinfo)
        networkinfo['TcpPorts'] = ports[0]
        networkinfo['UdpPorts'] = ports[1]
        return networkinfo

    def _resolve_attribute(self, name):
        if not self.resource_id:
            return
        if name == 'info':
            client = self.get_client()
            return client.inspect_container(self.resource_id)
        if name == 'network_info':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo
        if name == 'network_ip':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['IPAddress']
        if name == 'network_gateway':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['Gateway']
        if name == 'network_tcp_ports':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['TcpPorts']
        if name == 'network_udp_ports':
            client = self.get_client()
            networkinfo = self._container_networkinfo(client, self.resource_id)
            return networkinfo['UdpPorts']
        if name == 'logs':
            client = self.get_client()
            logs = client.logs(self.resource_id)
            return logs
        if name == 'logs_head':
            client = self.get_client()
            logs = client.logs(self.resource_id)
            return logs.split('\n')[0]
        if name == 'logs_tail':
            client = self.get_client()
            logs = client.logs(self.resource_id)
            return logs.split('\n').pop()

    def handle_create(self):
        create_args = {
            'image': self.properties[self.IMAGE],
            'command': self.properties[self.CMD],
            'hostname': self.properties[self.HOSTNAME],
            'user': self.properties[self.USER],
            'stdin_open': self.properties[self.OPEN_STDIN],
            'tty': self.properties[self.TTY],
            'mem_limit': self.properties[self.MEMORY],
            'ports': self.properties[self.PORT_SPECS],
            'environment': self.properties[self.ENV],
            'dns': self.properties[self.DNS],
            'volumes': self.properties[self.VOLUMES],
            'name': self.properties[self.NAME],
            'cpu_shares': self.properties[self.CPU_SHARES],
            'cpuset': self.properties[self.CPU_SET]
        }
        client = self.get_client()
        client.pull(self.properties[self.IMAGE])
        result = client.create_container(**create_args)
        container_id = result['Id']
        self.resource_id_set(container_id)

        start_args = {}

        if self.properties[self.PRIVILEGED]:
            start_args[self.PRIVILEGED] = True
        if self.properties[self.VOLUMES]:
            start_args['binds'] = self.properties[self.VOLUMES]
        if self.properties[self.VOLUMES_FROM]:
            start_args['volumes_from'] = self.properties[self.VOLUMES_FROM]
        if self.properties[self.PORT_BINDINGS]:
            start_args['port_bindings'] = self.properties[self.PORT_BINDINGS]
        if self.properties[self.LINKS]:
            start_args['links'] = self.properties[self.LINKS]
        if self.properties[self.RESTART_POLICY]:
            start_args['restart_policy'] = self.properties[self.RESTART_POLICY]
        if self.properties[self.CAP_ADD]:
            start_args['cap_add'] = self.properties[self.CAP_ADD]
        if self.properties[self.CAP_DROP]:
            start_args['cap_drop'] = self.properties[self.CAP_DROP]
        if self.properties[self.READ_ONLY]:
            start_args[self.READ_ONLY] = True
        if (self.properties[self.DEVICES] and
                not self.properties[self.PRIVILEGED]):
            start_args['devices'] = self._get_mapping_devices(
                self.properties[self.DEVICES])

        client.start(container_id, **start_args)
        return container_id

    def _get_mapping_devices(self, devices):
        actual_devices = []
        for device in devices:
            if device[self.PATH_IN_CONTAINER]:
                actual_devices.append(':'.join(
                    [device[self.PATH_ON_HOST],
                     device[self.PATH_IN_CONTAINER],
                     device[self.PERMISSIONS]]))
            else:
                actual_devices.append(':'.join(
                    [device[self.PATH_ON_HOST],
                     device[self.PATH_ON_HOST],
                     device[self.PERMISSIONS]]))
        return actual_devices

    def _get_container_status(self, container_id):
        client = self.get_client()
        info = client.inspect_container(container_id)
        return info['State']

    def check_create_complete(self, container_id):
        status = self._get_container_status(container_id)
        exit_status = status.get('ExitCode')
        if exit_status is not None and exit_status != 0:
            logs = self.get_client().logs(self.resource_id)
            raise exception.ResourceInError(resource_status=self.FAILED,
                                            status_reason=logs)
        return status['Running']

    def handle_delete(self):
        if self.resource_id is None:
            return
        client = self.get_client()
        try:
            client.kill(self.resource_id)
        except docker.errors.APIError as ex:
            if ex.response.status_code != 404:
                raise
        return self.resource_id

    def check_delete_complete(self, container_id):
        if container_id is None:
            return True
        try:
            status = self._get_container_status(container_id)
        except docker.errors.APIError as ex:
            if ex.response.status_code == 404:
                return True
            raise
        return (not status['Running'])

    def handle_suspend(self):
        if not self.resource_id:
            return
        client = self.get_client()
        client.stop(self.resource_id)
        return self.resource_id

    def check_suspend_complete(self, container_id):
        status = self._get_container_status(container_id)
        return (not status['Running'])

    def handle_resume(self):
        if not self.resource_id:
            return
        client = self.get_client()
        client.start(self.resource_id)
        return self.resource_id

    def check_resume_complete(self, container_id):
        status = self._get_container_status(container_id)
        return status['Running']

    def validate(self):
        super(DockerContainer, self).validate()
        self._validate_arg_for_api_version()

    def _validate_arg_for_api_version(self):
        version = None
        for key in MIN_API_VERSION_MAP:
            if self.properties[key]:
                if not version:
                    client = self.get_client()
                    version = client.version()['ApiVersion']
                min_version = MIN_API_VERSION_MAP[key]
                if compare_version(min_version, version) < 0:
                    raise InvalidArgForVersion(arg=key,
                                               min_version=min_version)