Exemplo n.º 1
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service"""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        if not os.path.isdir(ipc_dir):
            try:
                utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
                utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
                              ipc_dir, run_as_root=True)
                utils.execute('chmod', '750', ipc_dir, run_as_root=True)
            except utils.ProcessExecutionError:
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Could not create IPC directory %s") %
                              (ipc_dir, ))

        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL,
                          out_bind=True)
        except zmq.ZMQError:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Exemplo n.º 2
0
    def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):
        """Migrate the volume to the specified host (called on source host)."""
        volume_ref = self.db.volume_get(ctxt, volume_id)
        model_update = None
        moved = False

        self.db.volume_update(ctxt, volume_ref["id"], {"migration_status": "migrating"})
        if not force_host_copy:
            try:
                LOG.debug(_("volume %s: calling driver migrate_volume"), volume_ref["id"])
                moved, model_update = self.driver.migrate_volume(ctxt, volume_ref, host)
                if moved:
                    updates = {"host": host["host"], "migration_status": None}
                    if model_update:
                        updates.update(model_update)
                    volume_ref = self.db.volume_update(ctxt, volume_ref["id"], updates)
            except Exception:
                with excutils.save_and_reraise_exception():
                    updates = {"migration_status": None}
                    model_update = self.driver.create_export(ctxt, volume_ref)
                    if model_update:
                        updates.update(model_update)
                    self.db.volume_update(ctxt, volume_ref["id"], updates)
        if not moved:
            try:
                self._migrate_volume_generic(ctxt, volume_ref, host)
            except Exception:
                with excutils.save_and_reraise_exception():
                    updates = {"migration_status": None}
                    model_update = self.driver.create_export(ctxt, volume_ref)
                    if model_update:
                        updates.update(model_update)
                    self.db.volume_update(ctxt, volume_ref["id"], updates)
Exemplo n.º 3
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Exemplo n.º 4
0
    def _get_cinder_cascaded_admin_client(self):

        try:
            kwargs = {'username': cfg.CONF.cinder_username,
                      'password': cfg.CONF.admin_password,
                      'tenant_name': CONF.cinder_tenant_name,
                      'auth_url': cfg.CONF.keystone_auth_url,
                      'insecure': True
                      }

            keystoneclient = kc.Client(**kwargs)
            cinderclient = cinder_client.Client(
                username=cfg.CONF.cinder_username,
                auth_url=cfg.CONF.keystone_auth_url,
                insecure=True)
            cinderclient.client.auth_token = keystoneclient.auth_ref.auth_token
            diction = {'project_id': cfg.CONF.cinder_tenant_id}
            cinderclient.client.management_url = \
                cfg.CONF.cascaded_cinder_url % diction

            return cinderclient
        except keystone_exception.Unauthorized:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Token unauthorized failed for keystoneclient '
                            'constructed when get cascaded admin client'))
        except cinder_exception.Unauthorized:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Token unauthorized failed for cascaded '
                            'cinderClient constructed'))
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Failed to get cinder python client.'))
    def get_nameserver_info(self):
        """Get name server data from fabric.

        This method will return the connected node port wwn list(local
        and remote) for the given switch fabric
        """
        cli_output = None
        return_list = []
        try:
            cli_output = self._get_switch_info([ZoneConstant.NS_SHOW])
        except exception.BrocadeZoningCliException:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Failed collecting nsshow "
                            "info for fabric %s"), self.switch_ip)
        if (cli_output):
            return_list = self._parse_ns_output(cli_output)
        try:
            cli_output = self._get_switch_info([ZoneConstant.NS_CAM_SHOW])
        except exception.BrocadeZoningCliException:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Failed collecting nscamshow "
                            "info for fabric %s"), self.switch_ip)
        if (cli_output):
            return_list.extend(self._parse_ns_output(cli_output))
        cli_output = None
        return return_list
Exemplo n.º 6
0
    def delete_backup(self, context, backup_id):
        """Delete volume backup from configured backup service."""
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the backup status updated. Fail early since there
            # are no other status to change but backup's
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized as err:
            with excutils.save_and_reraise_exception():
                    self.db.backup_update(context, backup_id,
                                          {'status': 'error',
                                           'fail_reason':
                                           unicode(err)})

        LOG.info(_('Delete backup started, backup: %s.'), backup_id)
        backup = self.db.backup_get(context, backup_id)
        self.db.backup_update(context, backup_id, {'host': self.host})

        expected_status = 'deleting'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = _('Delete_backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                'expected_status': expected_status,
                'actual_status': actual_status,
            }
            self.db.backup_update(context, backup_id, {'status': 'error',
                                                       'fail_reason': err})
            raise exception.InvalidBackup(reason=err)

        backup_service = self._map_service_to_driver(backup['service'])
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Delete backup aborted, the backup service currently'
                        ' configured [%(configured_service)s] is not the'
                        ' backup service that was used to create this'
                        ' backup [%(backup_service)s].') % {
                    'configured_service': configured_service,
                    'backup_service': backup_service,
                }
                self.db.backup_update(context, backup_id,
                                      {'status': 'error'})
                raise exception.InvalidBackup(reason=err)

            try:
                backup_service = self.service.get_backup_driver(context)
                backup_service.delete(backup)
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    self.db.backup_update(context, backup_id,
                                          {'status': 'error',
                                           'fail_reason':
                                           unicode(err)})

        context = context.elevated()
        self.db.backup_destroy(context, backup_id)
        LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
Exemplo n.º 7
0
    def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
        """Copy data from src_vol to dest_vol."""
        LOG.debug(_('copy_data_between_volumes %(src)s -> %(dest)s.')
                  % {'src': src_vol['name'], 'dest': dest_vol['name']})

        properties = utils.brick_get_connector_properties()
        dest_remote = True if remote in ['dest', 'both'] else False
        dest_orig_status = dest_vol['status']
        try:
            dest_attach_info = self._attach_volume(context,
                                                   dest_vol,
                                                   properties,
                                                   remote=dest_remote)
        except Exception:
            with excutils.save_and_reraise_exception():
                msg = _("Failed to attach volume %(vol)s")
                LOG.error(msg % {'vol': dest_vol['id']})
                self.db.volume_update(context, dest_vol['id'],
                                      {'status': dest_orig_status})

        src_remote = True if remote in ['src', 'both'] else False
        src_orig_status = src_vol['status']
        try:
            src_attach_info = self._attach_volume(context,
                                                  src_vol,
                                                  properties,
                                                  remote=src_remote)
        except Exception:
            with excutils.save_and_reraise_exception():
                msg = _("Failed to attach volume %(vol)s")
                LOG.error(msg % {'vol': src_vol['id']})
                self.db.volume_update(context, src_vol['id'],
                                      {'status': src_orig_status})
                self._copy_volume_data_cleanup(context, dest_vol, properties,
                                               dest_attach_info, dest_remote,
                                               force=True)

        try:
            size_in_mb = int(src_vol['size']) * 1024    # vol size is in GB
            volume_utils.copy_volume(
                src_attach_info['device']['path'],
                dest_attach_info['device']['path'],
                size_in_mb,
                self.configuration.volume_dd_blocksize)
            copy_error = False
        except Exception:
            with excutils.save_and_reraise_exception():
                msg = _("Failed to copy volume %(src)s to %(dest)d")
                LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
                copy_error = True
        finally:
            self._copy_volume_data_cleanup(context, dest_vol, properties,
                                           dest_attach_info, dest_remote,
                                           force=copy_error)
            self._copy_volume_data_cleanup(context, src_vol, properties,
                                           src_attach_info, src_remote,
                                           force=copy_error)
Exemplo n.º 8
0
    def _get_cascaded_cinder_client(self, context=None):
        try:
            if context is None:
                cinderclient = cinder_client.Client(
                    auth_url=CONF.keystone_auth_url,
                    region_name=CONF.cascaded_region_name,
                    tenant_id=self.tenant_id,
                    api_key=CONF.admin_password,
                    username=CONF.cinder_username,
                    insecure=True,
                    timeout=30,
                    retries=3)
            else:
                ctx_dict = context.to_dict()

                kwargs = {
                    'auth_url': CONF.keystone_auth_url,
                    'tenant_name': CONF.cinder_tenant_name,
                    'username': CONF.cinder_username,
                    'password': CONF.admin_password,
                    'insecure': True
                }
                keystoneclient = kc.Client(**kwargs)
                management_url = self._get_management_url(keystoneclient, service_type='volumev2',
                                                      attr='region',
                                                      endpoint_type='publicURL',
                                                      filter_value=CONF.cascaded_region_name)

                LOG.info("before replace: management_url:%s", management_url)
                url = management_url.rpartition("/")[0]
                management_url = url+ '/' + ctx_dict.get("project_id")

                LOG.info("after replace: management_url:%s", management_url)

                cinderclient = cinder_client.Client(
                username=ctx_dict.get('user_id'),
                auth_url=cfg.CONF.keystone_auth_url,
                insecure=True,
                timeout=30,
                retries=3)
                cinderclient.client.auth_token = ctx_dict.get('auth_token')
                cinderclient.client.management_url = management_url

            LOG.info(_("cascade info: os_region_name:%s"), CONF.cascaded_region_name)
            return cinderclient
        except keystone_exception.Unauthorized:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Token unauthorized failed for keystoneclient '
                            'constructed when get cascaded admin client'))
        except cinder_exception.Unauthorized:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Token unauthorized failed for cascaded '
                            'cinderClient constructed'))
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Failed to get cinder python client.'))
Exemplo n.º 9
0
    def _get_mapped_lus(self, k2xa):
        mapped_lus = []
        # refresh cached cluster
        try:
            self._cluster = self._k2aclient.cluster.refresh(self._cluster,
                                                            xa=k2xa.ri())
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("ssp: exception: >%s<") % e)
                msg = _("ssp:"
                        " while discovering mapped LUs,"
                        " failed to refresh cluster"
                        " with cluster_id: >%(self._cluster.id)s<,"
                        " k2xa: >%(k2xa)s<")
                LOG.error(msg % {"self._cluster.id": self._cluster.id,
                                 "k2xa": k2xa.r(), })

        for node in self._cluster.node.node:
            if not node.virtual_io_server:
                LOG.info(_("Node >%s< has no virtual_io_server. "
                           "Skipping.") % str(node.__dict__))
                continue
            node_parts = node.virtual_io_server.split('/')
            ms_id = node_parts[-3]
            vios_id = node_parts[-1]
            try:
                vios = self._k2aclient.\
                    virtualioserver.get(ms_id,
                                        vios_id,
                                        xag=["ViosSCSIMapping"],
                                        xa=k2xa.ri())
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_("ssp: exception: >%s<") % e)
                    msg = _("ssp:"
                            " while discovering mapped LUs,"
                            " in cluster with"
                            " cluster_id: >%(self._cluster.id)s<"
                            " failed to retrieve vios"
                            " with managedsystem_id: >%(ms_id)s< and"
                            " virtualioserver_id: >%(vios_id)s<,"
                            " k2xa: >%(k2xa)s<")
                    LOG.error(msg %
                              {"self._cluster.id": self._cluster.id,
                               "ms_id": ms_id, "vios_id": vios_id,
                               "k2xa": k2xa.r(), })
            for vsm in vios.virtual_scsi_mappings.virtual_scsi_mapping:
                if ((vsm.storage is not None and
                     isinstance(vsm.storage, k2uom.LogicalUnit) and
                     vsm.storage.logical_unit_type == "VirtualIO_Disk")):
                    mapped_lus.append(vsm.storage.unique_device_id)
        return set(mapped_lus)
Exemplo n.º 10
0
    def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, new_type_id=None):
        """Migrate the volume to the specified host (called on source host)."""
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the migration status updated.
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(ctxt, volume_id, {"migration_status": "error"})

        volume_ref = self.db.volume_get(ctxt, volume_id)
        model_update = None
        moved = False

        status_update = None
        if volume_ref["status"] == "retyping":
            status_update = {"status": self._get_original_status(volume_ref)}

        self.db.volume_update(ctxt, volume_ref["id"], {"migration_status": "migrating"})
        if not force_host_copy and new_type_id is None:
            try:
                LOG.debug(_("volume %s: calling driver migrate_volume"), volume_ref["id"])
                moved, model_update = self.driver.migrate_volume(ctxt, volume_ref, host)
                if moved:
                    updates = {"host": host["host"], "migration_status": None}
                    if status_update:
                        updates.update(status_update)
                    if model_update:
                        updates.update(model_update)
                    volume_ref = self.db.volume_update(ctxt, volume_ref["id"], updates)
            except Exception:
                with excutils.save_and_reraise_exception():
                    updates = {"migration_status": None}
                    if status_update:
                        updates.update(status_update)
                    model_update = self.driver.create_export(ctxt, volume_ref)
                    if model_update:
                        updates.update(model_update)
                    self.db.volume_update(ctxt, volume_ref["id"], updates)
        if not moved:
            try:
                self._migrate_volume_generic(ctxt, volume_ref, host, new_type_id)
            except Exception:
                with excutils.save_and_reraise_exception():
                    updates = {"migration_status": None}
                    if status_update:
                        updates.update(status_update)
                    model_update = self.driver.create_export(ctxt, volume_ref)
                    if model_update:
                        updates.update(model_update)
                    self.db.volume_update(ctxt, volume_ref["id"], updates)
Exemplo n.º 11
0
    def create_key(self, ctxt, expiration=None, name='Cinder Volume Key',
                   payload_content_type='application/octet-stream', mode='CBC',
                   algorithm='AES', length=256):
        """Creates a key.

        :param ctxt: contains information of the user and the environment
                     for the request (cinder/context.py)
        :param expiration: the date the key will expire
        :param name: a friendly name for the secret
        :param payload_content_type: the format/type of the secret data
        :param mode: the algorithm mode (e.g. CBC or CTR mode)
        :param algorithm: the algorithm associated with the secret
        :param length: the bit length of the secret

        :return: the UUID of the new key
        :throws Exception: if key creation fails
        """
        barbican_client = self._get_barbican_client(ctxt)

        try:
            key_order = barbican_client.orders.create_key(
                name,
                algorithm,
                length,
                mode,
                payload_content_type,
                expiration)
            order_ref = key_order.submit()
            order = barbican_client.orders.get(order_ref)
            secret_uuid = order.secret_ref.rpartition('/')[2]
            return secret_uuid
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error creating key: %s"), (e))
Exemplo n.º 12
0
    def create_volume(self, context, topic, volume_id, snapshot_id=None,
                      image_id=None, request_spec=None,
                      filter_properties=None):
        try:
            if request_spec is None:
                # For RPC version < 1.2 backward compatibility
                request_spec = {}
                volume_ref = db.volume_get(context, volume_id)
                size = volume_ref.get('size')
                availability_zone = volume_ref.get('availability_zone')
                volume_type_id = volume_ref.get('volume_type_id')
                vol_type = db.volume_type_get(context, volume_type_id)
                volume_properties = {'size': size,
                                     'availability_zone': availability_zone,
                                     'volume_type_id': volume_type_id}
                request_spec.update(
                    {'volume_id': volume_id,
                     'snapshot_id': snapshot_id,
                     'image_id': image_id,
                     'volume_properties': volume_properties,
                     'volume_type': dict(vol_type).iteritems()})

            self.driver.schedule_create_volume(context, request_spec,
                                               filter_properties)
        except exception.NoValidHost as ex:
            volume_state = {'volume_state': {'status': 'error'}}
            self._set_volume_state_and_notify('create_volume',
                                              volume_state,
                                              context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                volume_state = {'volume_state': {'status': 'error'}}
                self._set_volume_state_and_notify('create_volume',
                                                  volume_state,
                                                  context, ex, request_spec)
Exemplo n.º 13
0
    def migrate_volume_to_host(self, context, topic, volume_id, host,
                               force_host_copy, request_spec,
                               filter_properties=None):
        """Ensure that the host exists and can accept the volume."""

        def _migrate_volume_set_error(self, context, ex, request_spec):
            volume_state = {'volume_state': {'migration_status': None}}
            self._set_volume_state_and_notify('migrate_volume_to_host',
                                              volume_state,
                                              context, ex, request_spec)

        try:
            tgt_host = self.driver.host_passes_filters(context, host,
                                                       request_spec,
                                                       filter_properties)
        except exception.NoValidHost as ex:
            _migrate_volume_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _migrate_volume_set_error(self, context, ex, request_spec)
        else:
            volume_ref = db.volume_get(context, volume_id)
            volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
                                                     tgt_host,
                                                     force_host_copy)
Exemplo n.º 14
0
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        caller_context = context
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        project_id = snapshot_ref['project_id']

        LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(
            context, snapshot_ref, "delete.start")

        try:
            LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])

            # Pass context so that drivers that want to use it, can,
            # but it is not a requirement for all drivers.
            snapshot_ref['context'] = caller_context

            self.driver.delete_snapshot(snapshot_ref)
        except exception.SnapshotIsBusy:
            LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
                      snapshot_ref['id'])
            self.db.snapshot_update(context,
                                    snapshot_ref['id'],
                                    {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error_deleting'})

        # Get reservations
        try:
            if CONF.no_snapshot_gb_quota:
                reserve_opts = {'snapshots': -1}
            else:
                reserve_opts = {
                    'snapshots': -1,
                    'gigabytes': -snapshot_ref['volume_size'],
                }
            volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting snapshot"))
        self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
        self.db.snapshot_destroy(context, snapshot_id)
        LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)
        return True
Exemplo n.º 15
0
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached"""
        # TODO(vish): refactor this into a more general "unreserve"
        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?

        volume = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume, "detach.start")
        try:
            self.driver.detach_volume(context, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error_detaching'})

        self.db.volume_detached(context.elevated(), volume_id)
        self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
                                             'attached_mode')

        # Check for https://bugs.launchpad.net/cinder/+bug/1065702
        volume = self.db.volume_get(context, volume_id)
        if (volume['provider_location'] and
                volume['name'] not in volume['provider_location']):
            self.driver.ensure_export(context, volume)
        self._notify_about_volume_usage(context, volume, "detach.end")
Exemplo n.º 16
0
    def _run_ssh(self, cmd_list, check_exit=True, attempts=1):
        utils.check_ssh_injection(cmd_list)
        command = ' '. join(cmd_list)

        if not self.sshpool:
            self.sshpool = utils.SSHPool(self.config.san_ip,
                                         self.config.san_ssh_port,
                                         self.config.ssh_conn_timeout,
                                         self.config.san_login,
                                         password=self.config.san_password,
                                         privatekey=
                                         self.config.san_private_key,
                                         min_size=
                                         self.config.ssh_min_pool_conn,
                                         max_size=
                                         self.config.ssh_max_pool_conn)
        try:
            total_attempts = attempts
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        return self._ssh_execute(ssh, command,
                                                 check_exit_code=check_exit)
                    except Exception as e:
                        LOG.error(e)
                        greenthread.sleep(randint(20, 500) / 100.0)
                msg = (_("SSH Command failed after '%(total_attempts)r' "
                         "attempts : '%(command)s'") %
                       {'total_attempts': total_attempts, 'command': command})
                raise paramiko.SSHException(msg)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error running ssh command: %s") % command)
Exemplo n.º 17
0
    def get_key(self, ctxt, key_id,
                payload_content_type='application/octet-stream'):
        """Retrieves the specified key.

        :param ctxt: contains information of the user and the environment for
                     the request (cinder/context.py)
        :param key_id: the UUID of the key to retrieve
        :param payload_content_type: The format/type of the secret data

        :return: SymmetricKey representation of the key
        :throws Exception: if key retrieval fails
        """
        try:
            secret_ref = self._create_secret_ref(key_id, barbican_client)
            secret = self._get_secret(ctxt, secret_ref)
            secret_data = self._get_secret_data(secret,
                                                payload_content_type)
            if payload_content_type == 'application/octet-stream':
                # convert decoded string to list of unsigned ints for each byte
                key_data = array.array('B',
                                       base64.b64decode(secret_data)).tolist()
            else:
                key_data = secret_data
            key = keymgr_key.SymmetricKey(secret.algorithm, key_data)
            return key
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error getting key: %s"), (e))
Exemplo n.º 18
0
    def _mapping_hostgroup_and_lungroup(self, volume_name,
                                        hostgroup_id, host_id):
        """Add hostgroup and lungroup to view."""
        lungroup_id = self._find_lungroup(volume_name)
        lun_id = self._get_volume_by_name(volume_name)
        view_id = self._find_mapping_view(volume_name)

        LOG.debug('_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)s'
                  'view_id: %(view_id)s'
                  % {'lun_group': lungroup_id,
                     'view_id': view_id})

        try:
            if view_id is None:
                view_id = self._add_mapping_view(volume_name, host_id)
                self._associate_hostgroup_to_view(view_id, hostgroup_id)
                self._associate_lungroup_to_view(view_id, lungroup_id)
            else:
                if not self._hostgroup_associated(view_id, hostgroup_id):
                    self._associate_hostgroup_to_view(view_id, hostgroup_id)
                if not self._lungroup_associated(view_id, lungroup_id):
                    self._associate_lungroup_to_view(view_id, lungroup_id)

        except Exception:
            with excutils.save_and_reraise_exception():
                self._delete_hostgoup_mapping_view(view_id, hostgroup_id)
                self._delete_lungroup_mapping_view(view_id, lungroup_id)
                self._delete_mapping_view(view_id)

        return lun_id
Exemplo n.º 19
0
def _xml_read(root, element, check=None):
    """Read an xml element.

    :param root: XML object
    :param element: string desired tag
    :param check: string if present, throw exception if element missing
    """

    try:
        val = root.findtext(element)
        LOG.info(_("%(element)s: %(val)s")
                 % {'element': element,
                    'val': val})
        if val:
            return val.strip()
        if check:
            raise exception.ParameterNotFound(param=element)
        return None
    except ETree.ParseError:
        if check:
            with excutils.save_and_reraise_exception():
                LOG.error(_("XML exception reading parameter: %s") % element)
        else:
            LOG.info(_("XML exception reading parameter: %s") % element)
            return None
Exemplo n.º 20
0
 def detach_volume(self, volume):
     LOG.debug("Detach Volume\n%s" % pprint.pformat(volume))
     try:
         self.clear_volume_key_value_pair(volume, "HPQ-CS-instance_uuid")
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_("Error detaching volume %s") % volume)
Exemplo n.º 21
0
    def _execute_cmd(self, cmd):
        """ Execute cli with status update.

        Executes CLI commands such as cfgsave where status return is expected
        """
        with Timeout(self.cmd_timeout):
            try:
                self.info(_("running: '%s'") % cmd)
                stdin, stdout, stderr = self.client.exec_command(cmd)
                stdin.write("%s\n" % ZoneConstant.YES)

                # Wait for command completion.
                exit_status = stdout.channel.recv_exit_status()
                stdin.flush()

                data = stdout.read()
                self.debug(_("Returned data was %r") % data)
                stdin.close()
                stdout.close()
                stderr.close()
            except Timeout as timeout:
                # Close the connection so that nobody tries to re-used it.
                self.close_connection()
                desc = self.exception_desc
                raise exception.\
                    FabricCommandTimeoutException(desc,
                                                  timeout=self.cmd_timeout,
                                                  cmd=cmd)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    msg = _("Error running command via ssh: %s") % e
                    self.error(msg)
Exemplo n.º 22
0
    def _get_barbican_client(self, ctxt):
        """Creates a client to connect to the Barbican service.

        :param ctxt: the user context for authentication
        :return: a Barbican Client object
        :throws NotAuthorized: if the ctxt is None
        """

        if not self._barbican_client:
            # Confirm context is provided, if not raise not authorized
            if not ctxt:
                msg = _("User is not authorized to use key manager.")
                LOG.error(msg)
                raise exception.NotAuthorized(msg)

            try:
                auth = identity.v3.Token(
                    auth_url=CONF.keymgr.encryption_auth_url,
                    token=ctxt.auth_token)
                sess = session.Session(auth=auth)
                self._barbican_client = barbican_client.Client(
                    session=sess,
                    endpoint=self._barbican_endpoint)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Error creating Barbican client: %s"), (e))

        return self._barbican_client
Exemplo n.º 23
0
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.

        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'

        """
        payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
        try:
            volume = self.db.volume_get(context, volume_id)
            self.driver.ensure_export(context.elevated(), volume)
            image_service, image_id = \
                glance.get_remote_image_service(context, image_meta['id'])
            self.driver.copy_volume_to_image(context, volume, image_service,
                                             image_meta)
            LOG.debug(_("Uploaded volume %(volume_id)s to "
                        "image (%(image_id)s) successfully"),
                      {'volume_id': volume_id, 'image_id': image_id})
        except Exception as error:
            with excutils.save_and_reraise_exception():
                payload['message'] = unicode(error)
        finally:
            if (volume['instance_uuid'] is None and
                    volume['attached_host'] is None):
                self.db.volume_update(context, volume_id,
                                      {'status': 'available'})
            else:
                self.db.volume_update(context, volume_id,
                                      {'status': 'in-use'})
Exemplo n.º 24
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "status": "attaching"})

            # TODO(vish): refactor this into a more general "reserve"
            # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
            if not uuidutils.is_uuid_like(instance_uuid):
                raise exception.InvalidUUID(uuid=instance_uuid)

            try:
                self.driver.attach_volume(context,
                                          volume_id,
                                          instance_uuid,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context,
                                          volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(),
                                    volume_id,
                                    instance_uuid,
                                    mountpoint)
Exemplo n.º 25
0
    def create_snapshots_in_db(self, context, volume_list, name, description, force, cgsnapshot_id):
        snapshot_list = []
        for volume in volume_list:
            self._create_snapshot_in_db_validate(context, volume, force)

        reservations = self._create_snapshots_in_db_reserve(context, volume_list)

        options_list = []
        for volume in volume_list:
            options = self._create_snapshot_in_db_options(context, volume, name, description, cgsnapshot_id)
            options_list.append(options)

        try:
            for options in options_list:
                snapshot = self.db.snapshot_create(context, options)
                snapshot_list.append(snapshot)

            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    for snap in snapshot_list:
                        self.db.snapshot_destroy(context, snap["id"])
                finally:
                    QUOTAS.rollback(context, reservations)

        return snapshot_list
Exemplo n.º 26
0
    def _create_volume(self, eseries_pool_label, eseries_volume_label,
                       size_gb):
        """Creates volume with given label and size."""

        target_pool = None

        pools = self._client.list_storage_pools()
        for pool in pools:
            if pool["label"] == eseries_pool_label:
                target_pool = pool
                break

        if not target_pool:
            msg = _("Pools %s does not exist")
            raise exception.NetAppDriverException(msg % eseries_pool_label)

        try:
            vol = self._client.create_volume(target_pool['volumeGroupRef'],
                                             eseries_volume_label, size_gb)
            LOG.info(_("Created volume with label %s."), eseries_volume_label)
        except exception.NetAppDriverException as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error creating volume. Msg - %s."),
                          six.text_type(e))

        return vol
Exemplo n.º 27
0
    def restore_backup(self, context, backup_id, volume_id):
        """
        Restore volume backups from configured backup service.
        """
        LOG.info(_('restore_backup started, restoring backup: %(backup_id)s'
                   ' to volume: %(volume_id)s') % locals())
        backup = self.db.backup_get(context, backup_id)
        volume = self.db.volume_get(context, volume_id)
        self.db.backup_update(context, backup_id, {'host': self.host})

        expected_status = 'restoring-backup'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('restore_backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s') % locals()
            self.db.backup_update(context, backup_id, {'status': 'available'})
            raise exception.InvalidVolume(reason=err)

        expected_status = 'restoring'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = _('restore_backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s') % locals()
            self.db.backup_update(context, backup_id, {'status': 'error',
                                                       'fail_reason': err})
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        if volume['size'] > backup['size']:
            LOG.warn('volume: %s, size: %d is larger than backup: %d, '
                     'size: %d, continuing with restore',
                     volume['id'], volume['size'],
                     backup['id'], backup['size'])

        backup_service = backup['service']
        configured_service = FLAGS.backup_service
        if backup_service != configured_service:
            err = _('restore_backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s]') % locals()
            self.db.backup_update(context, backup_id, {'status': 'available'})
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        try:
            backup_service = self.service.get_backup_service(context)
            self.driver.restore_backup(context, backup, volume,
                                       backup_service)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_restoring'})
                self.db.backup_update(context, backup_id,
                                      {'status': 'available'})

        self.db.volume_update(context, volume_id, {'status': 'available'})
        self.db.backup_update(context, backup_id, {'status': 'available'})
        LOG.info(_('restore_backup finished, backup: %(backup_id)s restored'
                   ' to volume: %(volume_id)s') % locals())
Exemplo n.º 28
0
    def copy_key(self, ctxt, key_id):
        """Copies (i.e., clones) a key stored by barbican.

        :param ctxt: contains information of the user and the environment for
                     the request (cinder/context.py)
        :param key_id: the UUID of the key to copy
        :return: the UUID of the key copy
        :throws Exception: if key copying fails
        """
        barbican_client = self._get_barbican_client(ctxt)

        try:
            secret_ref = self._create_secret_ref(key_id, barbican_client)
            secret = self._get_secret(ctxt, secret_ref)
            con_type = secret.content_types['default']
            secret_data = self._get_secret_data(secret,
                                                payload_content_type=con_type)
            key = keymgr_key.SymmetricKey(secret.algorithm, secret_data)
            copy_uuid = self.store_key(ctxt, key, secret.expiration,
                                       secret.name, con_type,
                                       'base64',
                                       secret.algorithm, secret.bit_length,
                                       secret.mode, True)
            return copy_uuid
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error copying key: %s"), (e))
Exemplo n.º 29
0
    def _set_qos_rule(self, qos, vvs_name):
        min_io = self._get_qos_value(qos, "minIOPS")
        max_io = self._get_qos_value(qos, "maxIOPS")
        min_bw = self._get_qos_value(qos, "minBWS")
        max_bw = self._get_qos_value(qos, "maxBWS")
        latency = self._get_qos_value(qos, "latency")
        priority = self._get_qos_value(qos, "priority", "normal")

        qosRule = {}
        if min_io:
            qosRule["ioMinGoal"] = int(min_io)
            if max_io is None:
                qosRule["ioMaxLimit"] = int(min_io)
        if max_io:
            qosRule["ioMaxLimit"] = int(max_io)
            if min_io is None:
                qosRule["ioMinGoal"] = int(max_io)
        if min_bw:
            qosRule["bwMinGoalKB"] = int(min_bw) * units.KiB
            if max_bw is None:
                qosRule["bwMaxLimitKB"] = int(min_bw) * units.KiB
        if max_bw:
            qosRule["bwMaxLimitKB"] = int(max_bw) * units.KiB
            if min_bw is None:
                qosRule["bwMinGoalKB"] = int(max_bw) * units.KiB
        if latency:
            qosRule["latencyGoal"] = int(latency)
        if priority:
            qosRule["priority"] = self.qos_priority_level.get(priority.lower())

        try:
            self.client.createQoSRules(vvs_name, qosRule)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error creating QOS rule %s") % qosRule)
Exemplo n.º 30
0
    def create_copy(self, src, tgt, src_id, config, opts, full_copy):
        """Create a new snapshot using FlashCopy."""
        LOG.debug(_('enter: create_copy: snapshot %(src)s to %(tgt)s') %
                  {'tgt': tgt, 'src': src})

        src_attrs = self.get_vdisk_attributes(src)
        if src_attrs is None:
            msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
                     'does not exist') % {'src': src, 'src_id': src_id})
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        src_size = src_attrs['capacity']
        pool = config.storwize_svc_volpool_name
        self.create_vdisk(tgt, src_size, 'b', pool, opts)
        timeout = config.storwize_svc_flashcopy_timeout
        try:
            self.run_flashcopy(src, tgt, timeout, full_copy=full_copy)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.delete_vdisk(tgt, True)

        LOG.debug(_('leave: _create_copy: snapshot %(tgt)s from '
                    'vdisk %(src)s') %
                  {'tgt': tgt, 'src': src})
Exemplo n.º 31
0
    def do_setup(self, context):
        """Disable cli confirmation and tune output format."""
        try:
            disabled_cli_features = ('confirmation', 'paging', 'events',
                                     'formatoutput')
            for feature in disabled_cli_features:
                self._eql_execute('cli-settings', feature, 'off')

            for line in self._eql_execute('grpparams', 'show'):
                if line.startswith('Group-Ipaddress:'):
                    out_tup = line.rstrip().partition(' ')
                    self._group_ip = out_tup[-1]

            LOG.info(_("EQL-driver: Setup is complete, group IP is %s"),
                     self._group_ip)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Failed to setup the Dell EqualLogic driver'))
Exemplo n.º 32
0
 def initialize_connection(self, volume, connector):
     """Restrict access to a volume."""
     try:
         cmd = ['volume', 'select', volume['name'], 'access', 'create',
                'initiator', connector['initiator']]
         if self.configuration.eqlx_use_chap:
             cmd.extend(['authmethod chap', 'username',
                         self.configuration.eqlx_chap_login])
         self._eql_execute(*cmd)
         iscsi_properties = self._get_iscsi_properties(volume)
         return {
             'driver_volume_type': 'iscsi',
             'data': iscsi_properties
         }
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_('Failed to initialize connection to volume %s'),
                       volume['name'])
Exemplo n.º 33
0
 def __iter__(self):
     """Return a result until we get a 'None' response from consumer"""
     if self._done:
         raise StopIteration
     while True:
         try:
             self._iterator.next()
         except Exception:
             with excutils.save_and_reraise_exception():
                 self.done()
         if self._got_ending:
             self.done()
             raise StopIteration
         result = self._result
         if isinstance(result, Exception):
             self.done()
             raise result
         yield result
Exemplo n.º 34
0
    def _get_secret(self, ctxt, secret_ref):
        """Creates the URL required for accessing a secret's metadata.

        :param ctxt: contains information of the user and the environment for
                     the request (cinder/context.py)
        :param secret_ref: URL to access the secret

        :return: the secret's metadata
        :throws Exception: if there is an error retrieving the data
        """

        barbican_client = self._get_barbican_client(ctxt)

        try:
            return barbican_client.secrets.get(secret_ref)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error getting secret metadata: %s"), (e))
Exemplo n.º 35
0
 def migrate_volume_to_host(self, context, topic, volume_id, host,
                            force_host_copy, request_spec,
                            filter_properties=None):
     """Ensure that the host exists and can accept the volume."""
     try:
         tgt_host = self.driver.host_passes_filters(context, host,
                                                    request_spec,
                                                    filter_properties)
     except exception.NoValidHost as ex:
             self._migrate_volume_set_error(context, ex, request_spec)
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             self._migrate_volume_set_error(context, ex, request_spec)
     else:
         volume_ref = db.volume_get(context, volume_id)
         volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
                                                  tgt_host,
                                                  force_host_copy)
Exemplo n.º 36
0
    def _run_ssh(self, cmd_list, attempts=1):
        utils.check_ssh_injection(cmd_list)
        command = ' '.join(cmd_list)

        if not self.sshpool:
            password = self.configuration.san_password
            privatekey = self.configuration.san_private_key
            min_size = self.configuration.ssh_min_pool_conn
            max_size = self.configuration.ssh_max_pool_conn
            self.sshpool = ssh_utils.SSHPool(
                self.configuration.san_ip,
                self.configuration.san_ssh_port,
                self.configuration.ssh_conn_timeout,
                self.configuration.san_login,
                password=password,
                privatekey=privatekey,
                min_size=min_size,
                max_size=max_size)
        try:
            total_attempts = attempts
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        LOG.info(_('EQL-driver: executing "%s"') % command)
                        return self._ssh_execute(
                            ssh,
                            command,
                            timeout=self.configuration.eqlx_cli_timeout)
                    except processutils.ProcessExecutionError:
                        raise
                    except Exception as e:
                        LOG.exception(e)
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                msg = (_("SSH Command failed after '%(total_attempts)r' "
                         "attempts : '%(command)s'") % {
                             'total_attempts': total_attempts,
                             'command': command
                         })
                raise exception.VolumeBackendAPIException(data=msg)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error running SSH command: %s") % command)
Exemplo n.º 37
0
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached"""
        # TODO(vish): refactor this into a more general "unreserve"
        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
        try:
            self.driver.detach_volume(context, volume_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error_detaching'})

        self.db.volume_detached(context.elevated(), volume_id)

        # Check for https://bugs.launchpad.net/cinder/+bug/1065702
        volume_ref = self.db.volume_get(context, volume_id)
        if (volume_ref['provider_location'] and
                volume_ref['name'] not in volume_ref['provider_location']):
            self.driver.ensure_export(context, volume_ref)
Exemplo n.º 38
0
 def create_snapshot(self, snapshot):
     """Creates a snapshot."""
     snap_grp, snap_image = None, None
     snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
     vol = self._get_volume(snapshot['volume_id'])
     vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
     pools = self._get_sorted_avl_storage_pools(vol_size_gb)
     try:
         snap_grp = self._client.create_snapshot_group(
             snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
         self._cache_snap_grp(snap_grp)
         snap_image = self._client.create_snapshot_image(
             snap_grp['pitGroupRef'])
         self._cache_snap_img(snap_image)
         LOG.info(_("Created snap grp with label %s."), snapshot_name)
     except exception.NetAppDriverException:
         with excutils.save_and_reraise_exception():
             if snap_image is None and snap_grp:
                 self.delete_snapshot(snapshot)
Exemplo n.º 39
0
def _xml_read(root, element, check=None):
    """Read an xml element."""
    try:
        val = root.findtext(element)
        LOG.info(_("%(element)s: %(val)s")
                 % {'element': element,
                    'val': val})
        if val:
            return val.strip()
        if check:
            raise exception.ParameterNotFound(param=element)
        return None
    except ETree.ParseError as e:
        if check:
            with excutils.save_and_reraise_exception():
                LOG.error(_("XML exception reading parameter: %s") % element)
        else:
            LOG.info(_("XML exception reading parameter: %s") % element)
            return None
Exemplo n.º 40
0
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(context, snapshot_ref,
                                          "create.start")

        try:
            LOG.debug(_("snapshot %(snap_id)s: creating"),
                      {'snap_id': snapshot_ref['id']})
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)

        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        {'status': 'error'})

        self.db.snapshot_update(context, snapshot_ref['id'], {
            'status': 'available',
            'progress': '100%'
        })

        vol_ref = self.db.volume_get(context, volume_id)
        if vol_ref.bootable:
            try:
                self.db.volume_glance_metadata_copy_to_snapshot(
                    context, snapshot_ref['id'], volume_id)
            except exception.CinderException as ex:
                LOG.exception(
                    _("Failed updating %(snapshot_id)s"
                      " metadata using the provided volumes"
                      " %(volume_id)s metadata") % {
                          'volume_id': volume_id,
                          'snapshot_id': snapshot_id
                      })
                raise exception.MetadataCopyFailure(reason=ex)
        LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
        return snapshot_id
Exemplo n.º 41
0
    def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
        # TODO(skolathur): Need to implement ssh_injection check
        # currently, the check will fail for zonecreate command
        # as zone members are separated by ';'which is a danger char
        command = ' '. join(cmd_list)

        if not self.sshpool:
            self.sshpool = utils.SSHPool(self.switch_ip,
                                         self.switch_port,
                                         None,
                                         self.switch_user,
                                         self.switch_pwd,
                                         min_size=1,
                                         max_size=5)
        last_exception = None
        try:
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        return processutils.ssh_execute(
                            ssh,
                            command,
                            check_exit_code=check_exit_code)
                    except Exception as e:
                        LOG.error(e)
                        last_exception = e
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                try:
                    raise processutils.ProcessExecutionError(
                        exit_code=last_exception.exit_code,
                        stdout=last_exception.stdout,
                        stderr=last_exception.stderr,
                        cmd=last_exception.cmd)
                except AttributeError:
                    raise processutils.ProcessExecutionError(
                        exit_code=-1,
                        stdout="",
                        stderr="Error running SSH command",
                        cmd=command)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error running SSH command: %s") % command)
Exemplo n.º 42
0
    def delete_backup(self, context, backup_id):
        """
        Delete volume backup from configured backup service.
        """
        backup = self.db.backup_get(context, backup_id)
        LOG.info(_('delete_backup started, backup: %s'), backup_id)
        self.db.backup_update(context, backup_id, {'host': self.host})

        expected_status = 'deleting'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = _(
                'delete_backup aborted, expected backup status '
                '%(expected_status)s but got %(actual_status)s') % locals()
            self.db.backup_update(context, backup_id, {
                'status': 'error',
                'fail_reason': err
            })
            raise exception.InvalidBackup(reason=err)

        backup_service = backup['service']
        configured_service = FLAGS.backup_service
        if backup_service != configured_service:
            err = _('delete_backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s]') % locals()
            self.db.backup_update(context, backup_id, {'status': 'available'})
            raise exception.InvalidBackup(reason=err)

        try:
            backup_service = self.service.get_backup_service(context)
            backup_service.delete(backup)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.backup_update(context, backup_id, {
                    'status': 'error',
                    'fail_reason': unicode(err)
                })

        context = context.elevated()
        self.db.backup_destroy(context, backup_id)
        LOG.info(_('delete_backup finished, backup %s deleted'), backup_id)
Exemplo n.º 43
0
    def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
        cinder_utils.check_ssh_injection(cmd_list)
        command = ' '.join(cmd_list)

        if not self.sshpool:
            self.sshpool = ssh_utils.SSHPool(
                self.target.get('san_ip'),
                self.target.get('san_ssh_port', 22),
                self.target.get('ssh_conn_timeout', 30),
                self.target.get('san_login'),
                password=self.target.get('san_password'),
                privatekey=self.target.get('san_private_key', ''),
                min_size=self.target.get('ssh_min_pool_conn', 1),
                max_size=self.target.get('ssh_max_pool_conn', 5),
            )
        last_exception = None
        try:
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        return processutils.ssh_execute(
                            ssh, command, check_exit_code=check_exit_code)
                    except Exception as e:
                        LOG.error(e)
                        last_exception = e
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                try:
                    raise processutils.ProcessExecutionError(
                        exit_code=last_exception.exit_code,
                        stdout=last_exception.stdout,
                        stderr=last_exception.stderr,
                        cmd=last_exception.cmd)
                except AttributeError:
                    raise processutils.ProcessExecutionError(
                        exit_code=-1,
                        stdout="",
                        stderr="Error running SSH command",
                        cmd=command)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error("Error running SSH command: %s", command)
Exemplo n.º 44
0
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.

        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'

        """
        payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)

            volume = self.db.volume_get(context, volume_id)

            volume_type_id = volume.get('volume_type_id', None)
            if self._is_share_volume(volume_type_id):
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error'})
                msg = _("Volume is share volume")
                raise exception.InvalidVolume(reason=msg)

            self.driver.ensure_export(context.elevated(), volume)
            image_service, image_id = \
                glance.get_remote_image_service(context, image_meta['id'])
            self.driver.copy_volume_to_image(context, volume, image_service,
                                             image_meta)
            LOG.debug(_("Uploaded volume %(volume_id)s to "
                        "image (%(image_id)s) successfully"),
                      {'volume_id': volume_id, 'image_id': image_id})
        except Exception as error:
            with excutils.save_and_reraise_exception():
                payload['message'] = unicode(error)
        finally:
            if not volume['volume_attachment']:
                self.db.volume_update(context, volume_id,
                                      {'status': 'available'})
            else:
                self.db.volume_update(context, volume_id,
                                      {'status': 'in-use'})
Exemplo n.º 45
0
    def schedule_create_volume(context, request_spec, filter_properties):

        def _log_failure(cause):
            LOG.error(_("Failed to schedule_create_volume: %(cause)s") %
                      {'cause': cause})

        def _notify_failure(cause):
            """When scheduling fails send out a event that it failed."""
            topic = "scheduler.create_volume"
            payload = {
                'request_spec': request_spec,
                'volume_properties': request_spec.get('volume_properties', {}),
                'volume_id': volume_id,
                'state': 'error',
                'method': 'create_volume',
                'reason': cause,
            }
            try:
                rpc.get_notifier('scheduler').error(context, topic, payload)
            except exception.CinderException:
                LOG.exception(_("Failed notifying on %(topic)s "
                                "payload %(payload)s") % {'topic': topic,
                                                          'payload': payload})

        try:
            driver.schedule_create_volume(context, request_spec,
                                          filter_properties)
        except exception.NoValidHost as e:
            # Not host found happened, notify on the scheduler queue and log
            # that this happened and set the volume to errored out and
            # *do not* reraise the error (since whats the point).
            _notify_failure(e)
            _log_failure(e)
            common.error_out_volume(context, db, volume_id, reason=e)
        except Exception as e:
            # Some other error happened, notify on the scheduler queue and log
            # that this happened and set the volume to errored out and
            # *do* reraise the error.
            with excutils.save_and_reraise_exception():
                _notify_failure(e)
                _log_failure(e)
                common.error_out_volume(context, db, volume_id, reason=e)
Exemplo n.º 46
0
    def create_volume(self,
                      context,
                      topic,
                      volume_id,
                      snapshot_id=None,
                      image_id=None,
                      request_spec=None,
                      filter_properties=None):
        try:
            if request_spec is None:
                # For RPC version < 1.2 backward compatibility
                request_spec = {}
                volume_ref = db.volume_get(context, volume_id)
                size = volume_ref.get('size')
                availability_zone = volume_ref.get('availability_zone')
                volume_type_id = volume_ref.get('volume_type_id')
                vol_type = db.volume_type_get(context, volume_type_id)
                volume_properties = {
                    'size': size,
                    'availability_zone': availability_zone,
                    'volume_type_id': volume_type_id
                }
                request_spec.update({
                    'volume_id': volume_id,
                    'snapshot_id': snapshot_id,
                    'image_id': image_id,
                    'volume_properties': volume_properties,
                    'volume_type': dict(vol_type).iteritems()
                })

            self.driver.schedule_create_volume(context, request_spec,
                                               filter_properties)
        except exception.NoValidHost as ex:
            volume_state = {'volume_state': {'status': 'error'}}
            self._set_volume_state_and_notify('create_volume', volume_state,
                                              context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                volume_state = {'volume_state': {'status': 'error'}}
                self._set_volume_state_and_notify('create_volume',
                                                  volume_state, context, ex,
                                                  request_spec)
Exemplo n.º 47
0
 def _check_storage_system(self):
     """Checks whether system is registered and has good status."""
     try:
         system = self._client.list_storage_system()
     except exception.NetAppDriverException:
         with excutils.save_and_reraise_exception():
             msg = _("System with controller addresses [%s] is not"
                     " registered with web service.")
             LOG.info(msg % self.configuration.netapp_controller_ips)
     password_not_in_sync = False
     if system.get('status', '').lower() == 'passwordoutofsync':
         password_not_in_sync = True
         new_pwd = self.configuration.netapp_sa_password
         self._client.update_stored_system_password(new_pwd)
         time.sleep(self.SLEEP_SECS)
     sa_comm_timeout = 60
     comm_time = 0
     while True:
         system = self._client.list_storage_system()
         status = system.get('status', '').lower()
         # wait if array not contacted or
         # password was not in sync previously.
         if ((status == 'nevercontacted') or
             (password_not_in_sync and status == 'passwordoutofsync')):
             LOG.info(_('Waiting for web service array communication.'))
             time.sleep(self.SLEEP_SECS)
             comm_time = comm_time + self.SLEEP_SECS
             if comm_time >= sa_comm_timeout:
                 msg = _("Failure in communication between web service and"
                         " array. Waited %s seconds. Verify array"
                         " configuration parameters.")
                 raise exception.NetAppDriverException(msg %
                                                       sa_comm_timeout)
         else:
             break
     msg_dict = {'id': system.get('id'), 'status': status}
     if (status == 'passwordoutofsync' or status == 'notsupported'
             or status == 'offline'):
         msg = _("System %(id)s found with bad status - %(status)s.")
         raise exception.NetAppDriverException(msg % msg_dict)
     LOG.info(_("System %(id)s has %(status)s status.") % msg_dict)
     return True
Exemplo n.º 48
0
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.

        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'

        """
        payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
        try:
            volume = self.db.volume_get(context, volume_id)
            self.driver.ensure_export(context.elevated(), volume)
            image_service, image_id = \
                glance.get_remote_image_service(context, image_meta['id'])
            self.driver.copy_volume_to_image(context, volume, image_service,
                                             image_meta)
            LOG.debug(_("Uploaded volume %(volume_id)s to "
                        "image (%(image_id)s) successfully") % locals())
        except Exception, error:
            with excutils.save_and_reraise_exception():
                payload['message'] = unicode(error)
Exemplo n.º 49
0
 def terminate_connection(self, volume, connector, **kwargs):
     """Terminate connection."""
     LOG.debug("Enter PureISCSIDriver.terminate_connection.")
     vol_name = _get_vol_name(volume)
     message = _("Disconnection failed with message: {0}")
     try:
         host_name = self._get_host_name(connector)
     except exception.PureDriverException as err:
         # Happens if the host object is missing.
         LOG.error(message.format(err.msg))
     else:
         try:
             self._array.disconnect_host(host_name, vol_name)
         except exception.PureAPIException as err:
             with excutils.save_and_reraise_exception() as ctxt:
                 if err.kwargs["code"] == 400:
                     # Happens if the host and volume are not connected.
                     ctxt.reraise = False
                     LOG.error(message.format(err.msg))
     LOG.debug("Leave PureISCSIDriver.terminate_connection.")
Exemplo n.º 50
0
    def create_copy(self,
                    src,
                    tgt,
                    src_id,
                    config,
                    opts,
                    full_copy,
                    pool=None):
        """Create a new snapshot using FlashCopy."""
        LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' % {
            'tgt': tgt,
            'src': src
        })

        src_attrs = self.get_vdisk_attributes(src)
        if src_attrs is None:
            msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
                     'does not exist') % {
                         'src': src,
                         'src_id': src_id
                     })
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        src_size = src_attrs['capacity']
        # In case we need to use a specific pool
        if not pool:
            pool = config.storwize_svc_volpool_name
        self.create_vdisk(tgt, src_size, 'b', pool, opts)
        timeout = config.storwize_svc_flashcopy_timeout
        try:
            self.run_flashcopy(src, tgt, timeout, full_copy=full_copy)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.delete_vdisk(tgt, True)

        LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
                  'vdisk %(src)s' % {
                      'tgt': tgt,
                      'src': src
                  })
Exemplo n.º 51
0
 def __iter__(self):
     """Return a result until we get a reply with an 'ending' flag."""
     if self._done:
         raise StopIteration
     while True:
         try:
             data = self._dataqueue.get(timeout=self._timeout)
             result = self._process_data(data)
         except queue.Empty:
             self.done()
             raise rpc_common.Timeout()
         except Exception:
             with excutils.save_and_reraise_exception():
                 self.done()
         if self._got_ending:
             self.done()
             raise StopIteration
         if isinstance(result, Exception):
             self.done()
             raise result
         yield result
Exemplo n.º 52
0
    def copy_data(self, pvol, size, p_is_vvol, method):
        type = 'Normal'
        is_vvol = method == 'THIN'
        svol = self._create_volume(size, is_vvol=is_vvol)
        try:
            if p_is_vvol:
                self.copy_sync_data(pvol, svol, size)
            else:
                if is_vvol:
                    type = 'V-VOL'
                self.copy_async_data(pvol, svol, is_vvol)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.delete_ldev(svol, is_vvol)
                except Exception as ex:
                    msg = basic_lib.set_msg(
                        313, ldev=svol, reason=six.text_type(ex))
                    LOG.warning(msg)

        return six.text_type(svol), type
Exemplo n.º 53
0
    def get_nameserver_info(self, fabric_vsan):
        """Get fcns database info from fabric.

        This method will return the connected node port wwn list(local
        and remote) for the given switch fabric
        """
        cli_output = None
        nsinfo_list = []
        try:
            cmd = ZoneConstant.FCNS_SHOW + fabric_vsan + ' | no-more'
            cli_output = self._get_switch_info(cmd)
        except exception.FCSanLookupServiceException:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Failed collecting show fcns database for"
                      " fabric"))
        if cli_output:
            nsinfo_list = self._parse_ns_output(cli_output)

        LOG.debug("Connector returning fcns info-%s", nsinfo_list)
        return nsinfo_list
Exemplo n.º 54
0
    def get_nameserver_info(self):
        """Get name server data from fabric.

        This method will return the connected node port wwn list(local
        and remote) for the given switch fabric
        """
        cli_output = None
        return_list = []
        try:
            cmd = '%(nsshow)s;%(nscamshow)s' % {
                'nsshow': ZoneConstant.NS_SHOW,
                'nscamshow': ZoneConstant.NS_CAM_SHOW}
            cli_output = self._get_switch_info([cmd])
        except exception.BrocadeZoningCliException:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Failed collecting nsshow "
                            "info for fabric %s"), self.switch_ip)
        if (cli_output):
            return_list = self._parse_ns_output(cli_output)
        cli_output = None
        return return_list
Exemplo n.º 55
0
    def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):

        command = ' '.join(cmd_list)

        if not self.sshpool:
            self.sshpool = ssh_utils.SSHPool(self.switch_ip,
                                             self.switch_port,
                                             None,
                                             self.switch_user,
                                             self.switch_pwd,
                                             min_size=1,
                                             max_size=5)
        last_exception = None
        try:
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        return processutils.ssh_execute(
                            ssh, command, check_exit_code=check_exit_code)
                    except Exception as e:
                        msg = _("Exception: %s") % six.text_type(e)
                        LOG.error(msg)
                        last_exception = e
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                try:
                    raise processutils.ProcessExecutionError(
                        exit_code=last_exception.exit_code,
                        stdout=last_exception.stdout,
                        stderr=last_exception.stderr,
                        cmd=last_exception.cmd)
                except AttributeError:
                    raise processutils.ProcessExecutionError(
                        exit_code=-1,
                        stdout="",
                        stderr="Error running SSH command",
                        cmd=command)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error running SSH command: %s") % command)
Exemplo n.º 56
0
    def _create_cgsnapshot(self, context, group, name, description):
        options = {
            'consistencygroup_id': group['id'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': "creating",
            'name': name,
            'description': description
        }

        try:
            cgsnapshot = self.db.cgsnapshot_create(context, options)
            cgsnapshot_id = cgsnapshot['id']

            volumes = self.db.volume_get_all_by_group(
                context.elevated(), cgsnapshot['consistencygroup_id'])

            if not volumes:
                msg = _("Consistency group is empty. No cgsnapshot "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            snap_name = cgsnapshot['name']
            snap_desc = cgsnapshot['description']
            self.volume_api.create_snapshots_in_db(context, volumes, snap_name,
                                                   snap_desc, True,
                                                   cgsnapshot_id)

        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.cgsnapshot_destroy(context, cgsnapshot_id)
                finally:
                    LOG.error(
                        _("Error occurred when creating cgsnapshot"
                          " %s."), cgsnapshot_id)

        self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)

        return cgsnapshot
Exemplo n.º 57
0
    def initialize_connection(self, volume, connector):
        """Create FC connection between a volume and a host."""
        LOG.debug(
            'initialize_connection: volume name: %(vol)s, '
            'host: %(host)s, initiator: %(wwn)s' % {
                'vol': volume['name'],
                'host': connector['host'],
                'wwn': connector['wwpns']
            })

        self.common._update_login_info()
        # First, add a host if it is not added before.
        host_id = self.common.add_host(connector['host'], connector['ip'])
        # Then, add free FC ports to the host.
        ini_wwns = connector['wwpns']
        free_wwns = self._get_connected_free_wwns()
        for wwn in free_wwns:
            if wwn in ini_wwns:
                self._add_fc_port_to_host(host_id, wwn)
        fc_port_details = self._get_host_port_details(host_id)
        tgt_wwns = self._get_tgt_fc_port_wwns(fc_port_details)

        LOG.debug('initialize_connection: Target FC ports WWNS: %s' % tgt_wwns)

        # Finally, map the volume to the host.
        volume_id = volume['provider_location']
        try:
            hostlun_id = self.common.map_volume(host_id, volume_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                # Remove the FC port from the host if the map failed.
                self._remove_fc_ports(host_id, connector)

        properties = {}
        properties['target_discovered'] = False
        properties['target_wwn'] = tgt_wwns
        properties['target_lun'] = int(hostlun_id)
        properties['volume_id'] = volume['id']

        return {'driver_volume_type': 'fibre_channel', 'data': properties}
Exemplo n.º 58
0
    def manage_existing(self, context, host, ref, name=None, description=None,
                        volume_type=None, metadata=None,
                        availability_zone=None):
        if availability_zone is None:
            elevated = context.elevated()
            try:
                service = self.db.service_get_by_host_and_topic(
                    elevated, host, CONF.volume_topic)
            except exception.ServiceNotFound:
                with excutils.save_and_reraise_exception():
                    LOG.error(_('Unable to find service for given host.'))
            availability_zone = service.get('availability_zone')

        volume_type_id = volume_type['id'] if volume_type else None
        volume_properties = {
            'size': 0,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': 'creating',
            'attach_status': 'detached',
            # Rename these to the internal name.
            'display_description': description,
            'display_name': name,
            'host': host,
            'availability_zone': availability_zone,
            'volume_type_id': volume_type_id,
            'metadata': metadata
        }

        # Call the scheduler to ensure that the host exists and that it can
        # accept the volume
        volume = self.db.volume_create(context, volume_properties)
        request_spec = {'volume_properties': volume,
                        'volume_type': volume_type,
                        'volume_id': volume['id'],
                        'ref': ref}
        self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
                                              volume['id'],
                                              request_spec=request_spec)
        return volume
Exemplo n.º 59
0
 def copy_async_data(self, pvol, svol, is_vvol):
     path_list = []
     driver = self.generated_from
     try:
         with self.pair_flock:
             self.delete_pair(pvol, all_split=False, is_vvol=is_vvol)
             paired_info = self.command.get_paired_info(pvol)
             if paired_info['pvol'] is None:
                 driver.pair_initialize_connection(pvol)
                 path_list.append(pvol)
             driver.pair_initialize_connection(svol)
             path_list.append(svol)
             self.command.comm_create_pair(pvol, svol, is_vvol)
     except Exception:
         with excutils.save_and_reraise_exception():
             for ldev in path_list:
                 try:
                     driver.pair_terminate_connection(ldev)
                 except Exception as ex:
                     msg = basic_lib.set_msg(
                         310, ldev=ldev, reason=six.text_type(ex))
                     LOG.warning(msg)
Exemplo n.º 60
0
 def create_volume_from_snapshot(self, volume, snapshot):
     """Creates a volume from a snapshot."""
     label = utils.convert_uuid_to_es_fmt(volume['id'])
     size = volume['size']
     dst_vol = self._schedule_and_create_volume(label, size)
     try:
         src_vol = None
         src_vol = self._create_snapshot_volume(snapshot['id'])
         self._copy_volume_high_prior_readonly(src_vol, dst_vol)
         self._cache_volume(dst_vol)
         LOG.info(_("Created volume with label %s."), label)
     except exception.NetAppDriverException:
         with excutils.save_and_reraise_exception():
             self._client.delete_volume(dst_vol['volumeRef'])
     finally:
         if src_vol:
             try:
                 self._client.delete_snapshot_volume(src_vol['id'])
             except exception.NetAppDriverException as e:
                 LOG.error(_("Failure deleting snap vol. Error: %s."), e)
         else:
             LOG.warn(_("Snapshot volume not found."))