예제 #1
0
파일: quobyte.py 프로젝트: sasimpson/cinder
    def do_setup(self, context):
        """Any initialization the volume driver does while starting."""
        self.set_nas_security_options(is_new_cinder_install=False)
        super(QuobyteDriver, self).do_setup(context)

        self.shares = {}  # address : options
        self._nova = compute.API()
예제 #2
0
    def do_setup(self, context):
        """Any initialization the volume driver does while starting."""
        super(GlusterfsDriver, self).do_setup(context)

        self._nova = compute.API()

        config = self.configuration.glusterfs_shares_config
        if not config:
            msg = (_("There's no Gluster config file configured (%s)") %
                   'glusterfs_shares_config')
            LOG.warn(msg)
            raise exception.GlusterfsException(msg)
        if not os.path.exists(config):
            msg = (_("Gluster config file at %(config)s doesn't exist") % {
                'config': config
            })
            LOG.warn(msg)
            raise exception.GlusterfsException(msg)

        self.shares = {}

        try:
            self._execute('mount.glusterfs', check_exit_code=False)
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                raise exception.GlusterfsException(
                    _('mount.glusterfs is not installed'))
            else:
                raise

        self._refresh_mounts()
예제 #3
0
파일: quobyte.py 프로젝트: openstack/cinder
    def do_setup(self, context):
        """Any initialization the volume driver does while starting."""
        super(QuobyteDriver, self).do_setup(context)

        self.set_nas_security_options(is_new_cinder_install=False)
        self.shares = {}  # address : options
        self._nova = compute.API()
        self.base = self.configuration.quobyte_mount_point_base
        if self.configuration.quobyte_overlay_volumes:
            if not (self.configuration.quobyte_qcow2_volumes
                    and self.configuration.quobyte_volume_from_snapshot_cache):
                self.configuration.quobyte_overlay_volumes = False
                LOG.warning("Configuration of quobyte_qcow2_volumes and "
                            "quobyte_volume_from_snapshot_cache is "
                            "incompatible with "
                            "quobyte_overlay_volumes=True. "
                            "quobyte_overlay_volumes "
                            "setting will be ignored.")
예제 #4
0
    def _migrate_volume_generic(self, ctxt, volume, host):
        rpcapi = volume_rpcapi.VolumeAPI()

        # Create new volume on remote host
        new_vol_values = {}
        for k, v in volume.iteritems():
            new_vol_values[k] = v
        del new_vol_values['id']
        del new_vol_values['_name_id']
        # We don't copy volume_type because the db sets that according to
        # volume_type_id, which we do copy
        del new_vol_values['volume_type']
        new_vol_values['host'] = host['host']
        new_vol_values['status'] = 'creating'
        new_vol_values['migration_status'] = 'target:%s' % volume['id']
        new_vol_values['attach_status'] = 'detached'
        new_volume = self.db.volume_create(ctxt, new_vol_values)
        rpcapi.create_volume(ctxt, new_volume, host['host'],
                             None, None)

        # Wait for new_volume to become ready
        starttime = time.time()
        deadline = starttime + CONF.migration_create_volume_timeout_secs
        new_volume = self.db.volume_get(ctxt, new_volume['id'])
        tries = 0
        while new_volume['status'] != 'available':
            tries = tries + 1
            now = time.time()
            if new_volume['status'] == 'error':
                msg = _("failed to create new_volume on destination host")
                raise exception.VolumeMigrationFailed(reason=msg)
            elif now > deadline:
                msg = _("timeout creating new_volume on destination host")
                raise exception.VolumeMigrationFailed(reason=msg)
            else:
                time.sleep(tries ** 2)
            new_volume = self.db.volume_get(ctxt, new_volume['id'])

        # Copy the source volume to the destination volume
        try:
            if volume['status'] == 'available':
                self.driver.copy_volume_data(ctxt, volume, new_volume,
                                             remote='dest')
                # The above call is synchronous so we complete the migration
                self.migrate_volume_completion(ctxt, volume['id'],
                                               new_volume['id'], error=False)
            else:
                nova_api = compute.API()
                # This is an async call to Nova, which will call the completion
                # when it's done
                nova_api.update_server_volume(ctxt, volume['instance_uuid'],
                                              volume['id'], new_volume['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
                LOG.error(msg % {'vol1': volume['id'],
                                 'vol2': new_volume['id']})
                volume = self.db.volume_get(ctxt, volume['id'])
                # If we're in the completing phase don't delete the target
                # because we may have already deleted the source!
                if volume['migration_status'] == 'migrating':
                    rpcapi.delete_volume(ctxt, new_volume)
                new_volume['migration_status'] = None