Exemple #1
0
    def do_setup(self, context):
        """Any initialization the volume driver does while starting."""
        super(GlusterfsDriver, self).do_setup(context)

        config = self.configuration.glusterfs_shares_config
        if not config:
            msg = (_("There's no Gluster config file configured (%s)") %
                   'glusterfs_shares_config')
            LOG.warn(msg)
            raise exception.GlusterfsException(msg)
        if not os.path.exists(config):
            msg = (_("Gluster config file at %(config)s doesn't exist") %
                   locals())
            LOG.warn(msg)
            raise exception.GlusterfsException(msg)

        self.shares = {}

        try:
            self._execute('mount.glusterfs', check_exit_code=False)
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                raise exception.GlusterfsException(
                    _('mount.glusterfs is not installed'))
            else:
                raise
Exemple #2
0
    def do_setup(self, context):
        """Any initialization the volume driver does while starting."""
        super(GlusterfsDriver, self).do_setup(context)

        LOG.warning(
            _LW("The GlusterFS volume driver is deprecated and "
                "will be removed during the Ocata cycle."))

        config = self.configuration.glusterfs_shares_config
        if not config:
            msg = (_("There's no Gluster config file configured (%s)") %
                   'glusterfs_shares_config')
            LOG.warning(msg)
            raise exception.GlusterfsException(msg)
        if not os.path.exists(config):
            msg = (_("Gluster config file at %(config)s doesn't exist") % {
                'config': config
            })
            LOG.warning(msg)
            raise exception.GlusterfsException(msg)

        self.shares = {}

        try:
            self._execute('mount.glusterfs', check_exit_code=False)
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                raise exception.GlusterfsException(
                    _('mount.glusterfs is not installed'))
            else:
                raise

        self._refresh_mounts()
Exemple #3
0
    def _write_info_file(self, info_path, snap_info):
        if 'active' not in snap_info.keys():
            msg = _("'active' must be present when writing snap_info.")
            raise exception.GlusterfsException(msg)

        with open(info_path, 'w') as f:
            json.dump(snap_info, f, indent=1, sort_keys=True)
    def _qemu_img_info_base(self, path, volume_name, basedir):
        """Sanitize image_utils' qemu_img_info.

        This code expects to deal only with relative filenames.
        """

        info = image_utils.qemu_img_info(path)
        if info.image:
            info.image = os.path.basename(info.image)
        if info.backing_file:
            backing_file_template = \
                "(%(basedir)s/[0-9a-f]+/)?%" \
                "(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
                    'basedir': basedir,
                    'volname': volume_name
                }
            if not re.match(backing_file_template, info.backing_file):
                msg = _("File %(path)s has invalid backing file "
                        "%(bfile)s, aborting.") % {
                            'path': path,
                            'bfile': info.backing_file
                        }
                raise exception.GlusterfsException(msg)

            info.backing_file = os.path.basename(info.backing_file)

        return info
Exemple #5
0
    def _ensure_share_writable(self, path):
        """Ensure that the Cinder user can write to the share.

        If not, raise an exception.

        :param path: path to test
        :raises: GlusterfsException
        :returns: None
        """

        prefix = '.cinder-write-test-' + str(os.getpid()) + '-'

        try:
            tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
        except OSError:
            msg = _('GlusterFS share at %(dir)s is not writable by the '
                    'Cinder volume service. Snapshot operations will not be '
                    'supported.') % {'dir': path}
            raise exception.GlusterfsException(msg)
Exemple #6
0
    def _delete_snapshot_online(self, context, snapshot, info):
        # Update info over the course of this method
        # active file never changes
        info_path = self._local_path_volume(snapshot['volume']) + '.info'
        snap_info = self._read_info_file(info_path)

        if info['active_file'] == info['snapshot_file']:
            # blockRebase/Pull base into active
            # info['base'] => snapshot_file

            file_to_delete = info['base_file']
            if info['base_id'] is None:
                # Passing base=none to blockRebase ensures that
                # libvirt blanks out the qcow2 backing file pointer
                new_base = None
            else:
                new_base = info['new_base_file']
                snap_info[info['base_id']] = info['snapshot_file']

            delete_info = {
                'file_to_merge': new_base,
                'merge_target_file': None,  # current
                'type': 'qcow2',
                'volume_id': snapshot['volume']['id']
            }

            del (snap_info[snapshot['id']])
        else:
            # blockCommit snapshot into base
            # info['base'] <= snapshot_file
            # delete record of snapshot
            file_to_delete = info['snapshot_file']

            delete_info = {
                'file_to_merge': info['snapshot_file'],
                'merge_target_file': info['base_file'],
                'type': 'qcow2',
                'volume_id': snapshot['volume']['id']
            }

            del (snap_info[snapshot['id']])

        try:
            self._nova.delete_volume_snapshot(context, snapshot['id'],
                                              delete_info)
        except Exception as e:
            LOG.error(_('Call to Nova delete snapshot failed'))
            LOG.exception(e)
            raise e

        # Loop and wait for result
        # Nova will call Cinderclient to update the status in the database
        # An update of progress = '90%' means that Nova is done
        seconds_elapsed = 0
        increment = 1
        timeout = 7200
        while True:
            s = db.snapshot_get(context, snapshot['id'])

            if s['status'] == 'deleting':
                if s['progress'] == '90%':
                    # Nova tasks completed successfully
                    break
                else:
                    msg = ('status of snapshot %s is '
                           'still "deleting"... waiting') % snapshot['id']
                    LOG.debug(msg)
                    time.sleep(increment)
                    seconds_elapsed += increment
            else:
                msg = _('Unable to delete snapshot %(id)s, '
                        'status: %(status)s.') % {
                            'id': snapshot['id'],
                            'status': s['status']
                        }
                raise exception.GlusterfsException(msg)

            if 10 < seconds_elapsed <= 20:
                increment = 2
            elif 20 < seconds_elapsed <= 60:
                increment = 5
            elif 60 < seconds_elapsed:
                increment = 10

            if seconds_elapsed > timeout:
                msg = _('Timed out while waiting for Nova update '
                        'for deletion of snapshot %(id)s.') %\
                    {'id': snapshot['id']}
                raise exception.GlusterfsException(msg)

        # Write info file updated above
        self._write_info_file(info_path, snap_info)

        # Delete stale file
        path_to_delete = os.path.join(
            self._local_volume_dir(snapshot['volume']), file_to_delete)
        self._execute('rm', '-f', path_to_delete, run_as_root=True)
Exemple #7
0
    def _delete_snapshot(self, snapshot):
        """Delete a snapshot.

        If volume status is 'available', delete snapshot here in Cinder
        using qemu-img.

        If volume status is 'in-use', calculate what qcow2 files need to
        merge, and call to Nova to perform this operation.

        :raises: InvalidVolume if status not acceptable
        :raises: GlusterfsException(msg) if operation fails
        :returns: None

        """

        LOG.debug('deleting snapshot %s' % snapshot['id'])

        volume_status = snapshot['volume']['status']
        if volume_status not in ['available', 'in-use']:
            msg = _('Volume status must be "available" or "in-use".')
            raise exception.InvalidVolume(msg)

        self._ensure_share_writable(self._local_volume_dir(snapshot['volume']))

        # Determine the true snapshot file for this snapshot
        #  based on the .info file
        info_path = self._local_path_volume(snapshot['volume']) + '.info'
        snap_info = self._read_info_file(info_path, empty_if_missing=True)

        if snapshot['id'] not in snap_info:
            # If snapshot info file is present, but snapshot record does not
            # exist, do not attempt to delete.
            # (This happens, for example, if snapshot_create failed due to lack
            # of permission to write to the share.)
            LOG.info(
                _('Snapshot record for %s is not present, allowing '
                  'snapshot_delete to proceed.') % snapshot['id'])
            return

        snapshot_file = snap_info[snapshot['id']]
        LOG.debug('snapshot_file for this snap is %s' % snapshot_file)

        snapshot_path = '%s/%s' % (self._local_volume_dir(
            snapshot['volume']), snapshot_file)

        snapshot_path_img_info = self._qemu_img_info(snapshot_path)

        vol_path = self._local_volume_dir(snapshot['volume'])

        # Find what file has this as its backing file
        active_file = self.get_active_image_from_info(snapshot['volume'])
        active_file_path = '%s/%s' % (vol_path, active_file)

        if volume_status == 'in-use':
            # Online delete
            context = snapshot['context']

            base_file = snapshot_path_img_info.backing_file
            if base_file is None:
                # There should always be at least the original volume
                # file as base.
                msg = _('No backing file found for %s, allowing snapshot '
                        'to be deleted.') % snapshot_path
                LOG.warn(msg)

                # Snapshot may be stale, so just delete it and update the
                # info file instead of blocking
                return self._delete_stale_snapshot(snapshot)

            base_path = os.path.join(
                self._local_volume_dir(snapshot['volume']), base_file)
            base_file_img_info = self._qemu_img_info(base_path)
            new_base_file = base_file_img_info.backing_file

            base_id = None
            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path)
            for key, value in snap_info.iteritems():
                if value == base_file and key != 'active':
                    base_id = key
                    break
            if base_id is None:
                # This means we are deleting the oldest snapshot
                msg = 'No %(base_id)s found for %(file)s' % {
                    'base_id': 'base_id',
                    'file': snapshot_file
                }
                LOG.debug(msg)

            online_delete_info = {
                'active_file': active_file,
                'snapshot_file': snapshot_file,
                'base_file': base_file,
                'base_id': base_id,
                'new_base_file': new_base_file
            }

            return self._delete_snapshot_online(context, snapshot,
                                                online_delete_info)

        if snapshot_file == active_file:
            # Need to merge snapshot_file into its backing file
            # There is no top file
            #      T0       |        T1         |
            #     base      |   snapshot_file   | None
            # (guaranteed to|  (being deleted)  |
            #    exist)     |                   |

            base_file = snapshot_path_img_info.backing_file

            self._qemu_img_commit(snapshot_path)
            self._execute('rm', '-f', snapshot_path, run_as_root=True)

            # Remove snapshot_file from info
            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path)

            del (snap_info[snapshot['id']])
            # Active file has changed
            snap_info['active'] = base_file
            self._write_info_file(info_path, snap_info)
        else:
            #    T0         |      T1        |     T2         |       T3
            #    base       |  snapshot_file |  higher_file   |  highest_file
            #(guaranteed to | (being deleted)|(guaranteed to  |  (may exist,
            #  exist, not   |                | exist, being   |needs ptr update
            #  used here)   |                | committed down)|     if so)

            backing_chain = self._get_backing_chain_for_path(
                snapshot['volume'], active_file_path)
            # This file is guaranteed to exist since we aren't operating on
            # the active file.
            higher_file = next(
                (os.path.basename(f['filename']) for f in backing_chain
                 if f.get('backing-filename', '') == snapshot_file), None)
            if higher_file is None:
                msg = _('No file found with %s as backing file.') %\
                    snapshot_file
                raise exception.GlusterfsException(msg)

            snap_info = self._read_info_file(info_path)
            higher_id = next(
                (i for i in snap_info
                 if snap_info[i] == higher_file and i != 'active'), None)
            if higher_id is None:
                msg = _('No snap found with %s as backing file.') %\
                    higher_file
                raise exception.GlusterfsException(msg)

            # Is there a file depending on higher_file?
            highest_file = next(
                (os.path.basename(f['filename']) for f in backing_chain
                 if f.get('backing-filename', '') == higher_file), None)
            if highest_file is None:
                msg = 'No file depends on %s.' % higher_file
                LOG.debug(msg)

            # Committing higher_file into snapshot_file
            # And update pointer in highest_file
            higher_file_path = '%s/%s' % (vol_path, higher_file)
            self._qemu_img_commit(higher_file_path)
            if highest_file is not None:
                highest_file_path = '%s/%s' % (vol_path, highest_file)
                info = self._qemu_img_info(snapshot_path)
                snapshot_file_fmt = info.file_format

                backing_fmt = ('-F', snapshot_file_fmt)
                self._execute('qemu-img',
                              'rebase',
                              '-u',
                              '-b',
                              snapshot_file,
                              highest_file_path,
                              *backing_fmt,
                              run_as_root=True)
            self._execute('rm', '-f', higher_file_path, run_as_root=True)

            # Remove snapshot_file from info
            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path)
            del (snap_info[snapshot['id']])
            snap_info[higher_id] = snapshot_file
            if higher_file == active_file:
                if highest_file is not None:
                    msg = _('Check condition failed: '
                            '%s expected to be None.') % 'highest_file'
                    raise exception.GlusterfsException(msg)
                # Active file has changed
                snap_info['active'] = snapshot_file
            self._write_info_file(info_path, snap_info)
Exemple #8
0
    def _create_snapshot(self, snapshot):
        """Create a snapshot.

        If volume is attached, call to Nova to create snapshot,
        providing a qcow2 file.
        Otherwise, create locally with qemu-img.

        A file named volume-<uuid>.info is stored with the volume
        data and is a JSON table which contains a mapping between
        Cinder snapshot UUIDs and filenames, as these associations
        will change as snapshots are deleted.


        Basic snapshot operation:

        1. Initial volume file:
            volume-1234

        2. Snapshot created:
            volume-1234  <- volume-1234.aaaa

            volume-1234.aaaa becomes the new "active" disk image.
            If the volume is not attached, this filename will be used to
            attach the volume to a VM at volume-attach time.
            If the volume is attached, the VM will switch to this file as
            part of the snapshot process.

            Note that volume-1234.aaaa represents changes after snapshot
            'aaaa' was created.  So the data for snapshot 'aaaa' is actually
            in the backing file(s) of volume-1234.aaaa.

            This file has a qcow2 header recording the fact that volume-1234 is
            its backing file.  Delta changes since the snapshot was created are
            stored in this file, and the backing file (volume-1234) does not
            change.

            info file: { 'active': 'volume-1234.aaaa',
                         'aaaa':   'volume-1234.aaaa' }

        3. Second snapshot created:
            volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb

            volume-1234.bbbb now becomes the "active" disk image, recording
            changes made to the volume.

            info file: { 'active': 'volume-1234.bbbb',
                         'aaaa':   'volume-1234.aaaa',
                         'bbbb':   'volume-1234.bbbb' }

        4. First snapshot deleted:
            volume-1234 <- volume-1234.aaaa(* now with bbbb's data)

            volume-1234.aaaa is removed (logically) from the snapshot chain.
            The data from volume-1234.bbbb is merged into it.

            (*) Since bbbb's data was committed into the aaaa file, we have
                "removed" aaaa's snapshot point but the .aaaa file now
                represents snapshot with id "bbbb".


            info file: { 'active': 'volume-1234.bbbb',
                         'bbbb':   'volume-1234.aaaa'   (* changed!)
                       }

        5. Second snapshot deleted:
            volume-1234

            volume-1234.bbbb is removed from the snapshot chain, as above.
            The base image, volume-1234, becomes the active image for this
            volume again.  If in-use, the VM begins using the volume-1234.bbbb
            file immediately as part of the snapshot delete process.

            info file: { 'active': 'volume-1234' }

        For the above operations, Cinder handles manipulation of qcow2 files
        when the volume is detached.  When attached, Cinder creates and deletes
        qcow2 files, but Nova is responsible for transitioning the VM between
        them and handling live transfers of data between files as required.
        """

        status = snapshot['volume']['status']
        if status not in ['available', 'in-use']:
            msg = _('Volume status must be "available" or "in-use"'
                    ' for snapshot. (is %s)') % status
            raise exception.InvalidVolume(msg)

        if status == 'in-use':
            # Perform online snapshot via Nova
            context = snapshot['context']

            backing_filename = self.get_active_image_from_info(
                snapshot['volume'])
            path_to_disk = self._local_path_volume(snapshot['volume'])
            new_snap_path = '%s.%s' % (self._local_path_volume(
                snapshot['volume']), snapshot['id'])

            self._create_qcow2_snap_file(snapshot, backing_filename,
                                         new_snap_path)

            connection_info = {
                'type': 'qcow2',
                'new_file': os.path.basename(new_snap_path),
                'snapshot_id': snapshot['id']
            }

            try:
                result = self._nova.create_volume_snapshot(
                    context, snapshot['volume_id'], connection_info)
                LOG.debug('nova call result: %s' % result)
            except Exception as e:
                LOG.error(_('Call to Nova to create snapshot failed'))
                LOG.exception(e)
                raise e

            # Loop and wait for result
            # Nova will call Cinderclient to update the status in the database
            # An update of progress = '90%' means that Nova is done
            seconds_elapsed = 0
            increment = 1
            timeout = 600
            while True:
                s = db.snapshot_get(context, snapshot['id'])

                if s['status'] == 'creating':
                    if s['progress'] == '90%':
                        # Nova tasks completed successfully
                        break

                    time.sleep(increment)
                    seconds_elapsed += increment
                elif s['status'] == 'error':

                    msg = _('Nova returned "error" status '
                            'while creating snapshot.')
                    raise exception.GlusterfsException(msg)

                LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
                    'id': snapshot['id'],
                    'status': s['status']
                })

                if 10 < seconds_elapsed <= 20:
                    increment = 2
                elif 20 < seconds_elapsed <= 60:
                    increment = 5
                elif 60 < seconds_elapsed:
                    increment = 10

                if seconds_elapsed > timeout:
                    msg = _('Timed out while waiting for Nova update '
                            'for creation of snapshot %s.') % snapshot['id']
                    raise exception.GlusterfsException(msg)

            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path, empty_if_missing=True)
            snap_info['active'] = os.path.basename(new_snap_path)
            snap_info[snapshot['id']] = os.path.basename(new_snap_path)
            self._write_info_file(info_path, snap_info)

            return

        LOG.debug('create snapshot: %s' % snapshot)
        LOG.debug('volume id: %s' % snapshot['volume_id'])

        path_to_disk = self._local_path_volume(snapshot['volume'])
        self._create_snapshot_offline(snapshot, path_to_disk)