Example #1
0
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        prg_attach = None
        prg_detach = None
        if prop_diff:
            # Even though some combinations of changed properties
            # could be updated in UpdateReplace manner,
            # we still first detach the old resource so that
            # self.resource_id is not replaced prematurely
            volume_id = self.properties[self.VOLUME_ID]
            server_id = self._stored_properties_data.get(self.INSTANCE_ID)
            self.client_plugin('nova').detach_volume(server_id,
                                                     self.resource_id)
            prg_detach = progress.VolumeDetachProgress(server_id, volume_id,
                                                       self.resource_id)
            prg_detach.called = True

            if self.VOLUME_ID in prop_diff:
                volume_id = prop_diff.get(self.VOLUME_ID)

            device = self.properties[self.DEVICE]
            if self.DEVICE in prop_diff:
                device = prop_diff.get(self.DEVICE)

            if self.INSTANCE_ID in prop_diff:
                server_id = prop_diff.get(self.INSTANCE_ID)
            prg_attach = progress.VolumeAttachProgress(server_id, volume_id,
                                                       device)

        return prg_detach, prg_attach
Example #2
0
    def _detach_attach_progress(self, vol):
        prg_attach = None
        prg_detach = None
        if vol.attachments:
            # NOTE(pshchelo):
            # this relies on current behavior of cinder attachments,
            # i.e. volume attachments is a list with len<=1,
            # so the volume can be attached only to single instance,
            # and id of attachment is the same as id of the volume
            # it describes, so detach/attach the same volume
            # will not change volume attachment id.
            server_id = vol.attachments[0]['server_id']
            device = vol.attachments[0]['device']
            attachment_id = vol.attachments[0]['id']
            prg_detach = progress.VolumeDetachProgress(server_id, vol.id,
                                                       attachment_id)
            prg_attach = progress.VolumeAttachProgress(server_id, vol.id,
                                                       device)

        return prg_detach, prg_attach
Example #3
0
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        prg_attach = None
        prg_detach = None
        if prop_diff:
            # Even though some combinations of changed properties
            # could be updated in UpdateReplace manner,
            # we still first detach the old resource so that
            # self.resource_id is not replaced prematurely
            volume_id = self.properties[self.VOLUME_ID]
            server_id = self.properties[self.INSTANCE_ID]

            prg_detach = progress.VolumeDetachProgress(
                server_id, volume_id, self.resource_id)

            # Waiting OS-EXT-STS:task_state in server to become available for
            # detach
            server = self.client_plugin('nova').fetch_server(server_id)
            task_state = getattr(server, 'OS-EXT-STS:task_state', None)
            # Wait till out of any resize steps (including resize_finish)
            if task_state is not None and 'resize' in task_state:
                prg_detach.called = False
            else:
                self.client_plugin('nova').detach_volume(server_id,
                                                         self.resource_id)
                prg_detach.called = True

            if self.VOLUME_ID in prop_diff:
                volume_id = prop_diff.get(self.VOLUME_ID)

            device = (self.properties[self.DEVICE]
                      if self.properties[self.DEVICE] else None)
            if self.DEVICE in prop_diff:
                device = (prop_diff[self.DEVICE]
                          if prop_diff[self.DEVICE] else None)

            if self.INSTANCE_ID in prop_diff:
                server_id = prop_diff.get(self.INSTANCE_ID)
            prg_attach = progress.VolumeAttachProgress(
                server_id, volume_id, device)

        return prg_detach, prg_attach
Example #4
0
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        vol = None
        cinder = self.client()
        prg_resize = None
        prg_attach = None
        prg_detach = None
        prg_backup_restore = None
        # update the name and description for cinder volume
        if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
            vol = cinder.volumes.get(self.resource_id)
            update_name = (prop_diff.get(self.NAME)
                           or self.properties[self.NAME])
            update_description = (prop_diff.get(self.DESCRIPTION)
                                  or self.properties[self.DESCRIPTION])
            kwargs = self._fetch_name_and_description(
                cinder.volume_api_version, update_name, update_description)
            cinder.volumes.update(vol, **kwargs)
        # update the metadata for cinder volume
        if self.METADATA in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)
            metadata = prop_diff.get(self.METADATA)
            cinder.volumes.update_all_metadata(vol, metadata)
        # retype
        if self.VOLUME_TYPE in prop_diff:
            if cinder.volume_api_version == 1:
                LOG.info(
                    _LI('Volume type update not supported '
                        'by Cinder API V1.'))
                raise exception.NotSupported(
                    feature=_('Using Cinder API V1, volume_type update'))
            else:
                if not vol:
                    vol = cinder.volumes.get(self.resource_id)
                new_vol_type = prop_diff.get(self.VOLUME_TYPE)
                cinder.volumes.retype(vol, new_vol_type, 'never')
        # update read_only access mode
        if self.READ_ONLY in prop_diff:
            flag = prop_diff.get(self.READ_ONLY)
            cinder.volumes.update_readonly_flag(self.resource_id, flag)
        # restore the volume from backup
        if self.BACKUP_ID in prop_diff:
            prg_backup_restore = progress.VolumeBackupRestoreProgress(
                vol_id=self.resource_id,
                backup_id=prop_diff.get(self.BACKUP_ID))
        # extend volume size
        if self.SIZE in prop_diff:
            if not vol:
                vol = cinder.volumes.get(self.resource_id)

            new_size = prop_diff[self.SIZE]
            if new_size < vol.size:
                raise exception.NotSupported(feature=_("Shrinking volume"))

            elif new_size > vol.size:
                prg_resize = progress.VolumeResizeProgress(size=new_size)
                if vol.attachments:
                    # NOTE(pshchelo):
                    # this relies on current behavior of cinder attachments,
                    # i.e. volume attachments is a list with len<=1,
                    # so the volume can be attached only to single instance,
                    # and id of attachment is the same as id of the volume
                    # it describes, so detach/attach the same volume
                    # will not change volume attachment id.
                    server_id = vol.attachments[0]['server_id']
                    device = vol.attachments[0]['device']
                    attachment_id = vol.attachments[0]['id']
                    prg_detach = progress.VolumeDetachProgress(
                        server_id, vol.id, attachment_id)
                    prg_attach = progress.VolumeAttachProgress(
                        server_id, vol.id, device)

        return prg_backup_restore, prg_detach, prg_resize, prg_attach
Example #5
0
    def handle_create(self):
        security_groups = self._get_security_groups()

        userdata = self.properties[self.USER_DATA] or ''
        flavor = self.properties[self.INSTANCE_TYPE]
        availability_zone = self.properties[self.AVAILABILITY_ZONE]

        image_name = self.properties[self.IMAGE_ID]

        image_id = self.client_plugin('glance').find_image_by_name_or_id(
            image_name)

        flavor_id = self.client_plugin().find_flavor_by_name_or_id(flavor)

        scheduler_hints = {}
        if self.properties[self.NOVA_SCHEDULER_HINTS]:
            for tm in self.properties[self.NOVA_SCHEDULER_HINTS]:
                # adopted from novaclient shell
                hint = tm[self.NOVA_SCHEDULER_HINT_KEY]
                hint_value = tm[self.NOVA_SCHEDULER_HINT_VALUE]
                if hint in scheduler_hints:
                    if isinstance(scheduler_hints[hint], six.string_types):
                        scheduler_hints[hint] = [scheduler_hints[hint]]
                    scheduler_hints[hint].append(hint_value)
                else:
                    scheduler_hints[hint] = hint_value
        else:
            scheduler_hints = None
        scheduler_hints = self._scheduler_hints(scheduler_hints)

        nics = self._build_nics(self.properties[self.NETWORK_INTERFACES],
                                security_groups=security_groups,
                                subnet_id=self.properties[self.SUBNET_ID])

        block_device_mapping = self._build_block_device_mapping(
            self.properties.get(self.BLOCK_DEVICE_MAPPINGS))

        server = None

        try:
            server = self.client().servers.create(
                name=self.physical_resource_name(),
                image=image_id,
                flavor=flavor_id,
                key_name=self.properties[self.KEY_NAME],
                security_groups=security_groups,
                userdata=self.client_plugin().build_userdata(
                    self.metadata_get(), userdata, 'ec2-user'),
                meta=self._get_nova_metadata(self.properties),
                scheduler_hints=scheduler_hints,
                nics=nics,
                availability_zone=availability_zone,
                block_device_mapping=block_device_mapping)
        finally:
            # Avoid a race condition where the thread could be cancelled
            # before the ID is stored
            if server is not None:
                self.resource_id_set(server.id)

        creator = progress.ServerCreateProgress(server.id)
        attachers = []
        for vol_id, device in self.volumes():
            attachers.append(
                progress.VolumeAttachProgress(self.resource_id, vol_id,
                                              device))
        return creator, tuple(attachers)