def manage_existing_get_size(self, volume, ref): """Return size of an existing Vdisk for manage_existing. existing_ref is a dictionary of the form: {'source-id': <uid of disk>} Optional elements are: 'manage_if_in_use': True/False (default is False) If set to True, a volume will be managed even if it is currently attached to a host system. """ # Check that the reference is valid if 'source-id' not in ref: reason = _('Reference must contain source-id element.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) # Check for existence of the vdisk vdisk = self._helpers.vdisk_by_uid(ref['source-id']) if vdisk is None: reason = (_('No vdisk with the UID specified by source-id %s.') % (ref['source-id'])) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) # Check if the disk is in use, if we need to. manage_if_in_use = ref.get('manage_if_in_use', False) if (not manage_if_in_use and self._helpers.is_vdisk_in_use(vdisk['name'])): reason = _('The specified vdisk is mapped to a host.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_('Removing iscsi_target for: %s') % vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self.volumes_dir, vol_uuid_file) if not os.path.exists(volume_path): LOG.warning(_('Volume path %s does not exist, ' 'nothing to remove.') % volume_path) return if os.path.isfile(volume_path): iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: # NOTE(vish): --force is a workaround for bug: # https://bugs.launchpad.net/cinder/+bug/1159948 self._execute('tgt-admin', '--force', '--delete', iqn, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error(_("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s") % {'vol_id': vol_id, 'e': str(e)}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) os.unlink(volume_path)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error(_("Unable to find LV: %s") % source_lv_name) return False cmd = ['lvcreate', '--name', name, '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_('Error creating snapshot')) LOG.error(_('Cmd :%s') % err.cmd) LOG.error(_('StdOut :%s') % err.stdout) LOG.error(_('StdErr :%s') % err.stderr) raise
def _get_pool_info(self, poolid): """Query pool information.""" ret, output = self.dpl.get_pool(poolid) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_pool_status, poolid, event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to get pool info %(id)s: ' '%(status)s.') % {'id': poolid, 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: ret = 0 output = status.get('output', {}) else: LOG.error(_('Flexvisor failed to get pool info ' '(failed to get event)%s.') % (poolid)) raise exception.VolumeBackendAPIException( data="failed to get event") elif ret != 0: msg = _('Flexvisor failed to get pool info %(id)s: ' '%(status)s.') % {'id': poolid, 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = 'Flexvisor succeed to get pool info.' LOG.debug(msg) return ret, output
def delete(self, name): """Delete logical volume or snapshot. :param name: Name of LV to delete """ try: self._execute('lvremove', '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: mesg = (_('Error reported running lvremove: CMD: %(command)s, ' 'RESPONSE: %(response)s') % {'command': err.cmd, 'response': err.stderr}) LOG.error(mesg) LOG.warning(_('Attempting udev settle and retry of lvremove...')) self._execute('udevadm', 'settle', root_helper=self._root_helper, run_as_root=True) self._execute('lvremove', '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True)
def _check(self, match, target_dict, cred_dict): try: match_kind, match_value = match.split(':', 1) except Exception: LOG.exception(_("Failed to understand rule %(match)r") % locals()) # If the rule is invalid, fail closed return False func = None try: old_func = getattr(self, '_check_%s' % match_kind) except AttributeError: func = self._checks.get(match_kind, self._checks.get(None, None)) else: LOG.warning(_("Inheritance-based rules are deprecated; update " "_check_%s") % match_kind) func = lambda brain, kind, value, target, cred: old_func(value, target, cred) if not func: LOG.error(_("No handler for matches of kind %s") % match_kind) # Fail closed return False return func(self, match_kind, match_value, target_dict, cred_dict)
def create_volume(self, name, size_str, lv_type='default', mirror_count=0): """Creates a logical volume on the object's VG. :param name: Name to use when creating Logical Volume :param size_str: Size to use when creating Logical Volume :param lv_type: Type of Volume (default or thin) :param mirror_count: Use LVM mirroring with specified count """ if lv_type == 'thin': pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path] else: cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str] if mirror_count > 0: cmd.extend(['-m', mirror_count, '--nosync']) terras = int(size_str[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd.extend(['-R', str(rsize)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_('Error creating Volume')) LOG.error(_('Cmd :%s') % err.cmd) LOG.error(_('StdOut :%s') % err.stdout) LOG.error(_('StdErr :%s') % err.stderr) raise
def activate_lv(self, name, is_snapshot=False): """Ensure that logical volume/snapshot logical volume is activated. :param name: Name of LV to activate :raises: putils.ProcessExecutionError """ # This is a no-op if requested for a snapshot on a version # of LVM that doesn't support snapshot activation. # (Assume snapshot LV is always active.) if is_snapshot and not self.supports_snapshot_lv_activation: return lv_path = self.vg_name + '/' + self._mangle_lv_name(name) # Must pass --yes to activate both the snap LV and its origin LV. # Otherwise lvchange asks if you would like to do this interactively, # and fails. cmd = ['lvchange', '-a', 'y', '--yes'] if self.supports_lvchange_ignoreskipactivation: cmd.append('-K') cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_('Error activating LV')) LOG.error(_('Cmd :%s') % err.cmd) LOG.error(_('StdOut :%s') % err.stdout) LOG.error(_('StdErr :%s') % err.stderr) raise
def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: self.connection.release() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % params)
def _create_ibmnas_snap(self, src, dest, mount_path): """Create volume clones and snapshots.""" LOG.debug("Enter _create_ibmnas_snap: src %(src)s, dest %(dest)s" % {'src': src, 'dest': dest}) if mount_path is not None: tmp_file_path = dest + '.snap' ssh_cmd = ['mkclone', '-p', dest, '-s', src, '-t', tmp_file_path] try: self._run_ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_("Failed in _create_ibmnas_snap during " "create_snapshot. Error: %s") % e.stderr) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) #Now remove the tmp file tmp_file_local_path = os.path.join(mount_path, os.path.basename(tmp_file_path)) self._execute('rm', '-f', tmp_file_local_path, run_as_root=True) else: ssh_cmd = ['mkclone', '-s', src, '-t', dest] try: self._run_ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_("Failed in _create_ibmnas_snap during " "create_volume_from_snapshot. Error: %s") % e.stderr) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Exit _create_ibmnas_snap")
def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None, size=None): fetch(context, image_service, image_id, dest, None, None) with fileutils.remove_path_on_error(dest): data = qemu_img_info(dest) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and data.virtual_size > size: params = {'image_size': data.virtual_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def accept(self, req, id, body): """Accept a new volume transfer.""" transfer_id = id LOG.debug('Accepting volume transfer %s', transfer_id) if not self.is_valid_body(body, 'accept'): raise exc.HTTPBadRequest() context = req.environ['cinder.context'] try: accept = body['accept'] auth_key = accept['auth_key'] except KeyError: msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Accepting transfer %s"), transfer_id, context=context) try: accepted_transfer = self.transfer_api.accept(context, transfer_id, auth_key) except exception.VolumeSizeExceedsAvailableQuota as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': 0}) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) transfer = \ self._view_builder.summary(req, dict(accepted_transfer.iteritems())) return transfer
def update(self, req, id, body): context = req.environ['cinder.context'] authorize_update(context) project_id = id if not self.is_valid_body(body, 'quota_set'): msg = (_("Missing required element quota_set in request body.")) raise webob.exc.HTTPBadRequest(explanation=msg) bad_keys = [] for key, value in body['quota_set'].items(): if (key not in QUOTAS and key not in NON_QUOTA_KEYS): bad_keys.append(key) continue if len(bad_keys) > 0: msg = _("Bad key(s) in quota set: %s") % ",".join(bad_keys) raise webob.exc.HTTPBadRequest(explanation=msg) for key in body['quota_set'].keys(): if key in NON_QUOTA_KEYS: continue value = self._validate_quota_limit(body['quota_set'][key]) try: db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_set': self._get_quotas(context, id)}
def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ LOG.debug("Enter SolidFire delete_volume...") sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error(_("Account for Volume ID %s was not found on " "the SolidFire Cluster!") % volume['id']) LOG.error(_("This usually means the volume was never " "successfully created.")) return params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is not None: params = {'volumeID': sf_vol['volumeID']} data = self._issue_api_request('DeleteVolume', params) if 'result' not in data: msg = _("Failed to delete SolidFire Volume: %s") % data raise exception.SolidFireAPIException(msg) else: LOG.error(_("Volume ID %s was not found on " "the SolidFire Cluster!"), volume['id']) LOG.debug("Leaving SolidFire delete_volume")
def create(self, req, body): """Create a new volume transfer.""" LOG.debug('Creating new volume transfer %s', body) if not self.is_valid_body(body, 'transfer'): raise exc.HTTPBadRequest() context = req.environ['cinder.context'] try: transfer = body['transfer'] volume_id = transfer['volume_id'] except KeyError: msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) name = transfer.get('name', None) LOG.audit(_("Creating transfer of volume %s"), volume_id, context=context) try: new_transfer = self.transfer_api.create(context, volume_id, name) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) transfer = self._view_builder.create(req, dict(new_transfer.iteritems())) return transfer
def _prepare_fc_map(self, fc_map_id, timeout): self.ssh.prestartfcmap(fc_map_id) mapping_ready = False wait_time = 5 max_retries = (timeout / wait_time) + 1 for try_number in range(1, max_retries): mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) if (mapping_attrs is None or 'status' not in mapping_attrs): break if mapping_attrs['status'] == 'prepared': mapping_ready = True break elif mapping_attrs['status'] == 'stopped': self.ssh.prestartfcmap(fc_map_id) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexecpted mapping status %(status)s for mapping' '%(id)s. Attributes: %(attr)s') % {'status': mapping_attrs['status'], 'id': fc_map_id, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) greenthread.sleep(wait_time) if not mapping_ready: msg = (_('Mapping %(id)s prepare failed to complete within the' 'allotted %(to)d seconds timeout. Terminating.') % {'id': fc_map_id, 'to': timeout}) LOG.error(msg) raise exception.VolumeDriverException(message=msg)
def _get_sf_volume(self, uuid, params): data = self._issue_api_request('ListVolumesForAccount', params) if 'result' not in data: msg = _("Failed to get SolidFire Volume: %s") % data raise exception.SolidFireAPIException(msg) found_count = 0 sf_volref = None for v in data['result']['volumes']: if uuid in v['name']: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(sfid)s " "to cinder ID %(uuid)s." % {'sfid': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error(_("Volume %s, not found on SF Cluster."), uuid) if found_count > 1: LOG.error(_("Found %(count)s volumes mapped to id: %(uuid)s.") % {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) return sf_volref
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug('Running cmd (SSH): %s', cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=cmd) return (stdout, stderr)
def _error(self, inner, req): if not isinstance(inner, exception.QuotaError): LOG.exception(_("Caught error: %s"), unicode(inner)) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: msg = (inner.msg if isinstance(inner, exception.CinderException) else unicode(inner)) params = {'exception': inner.__class__.__name__, 'explanation': msg} outer.explanation = _('%(exception)s: %(explanation)s') % params return wsgi.Fault(outer)
def create(self, context, volume_id, display_name): """Creates an entry in the transfers table.""" volume_api.check_policy(context, 'create_transfer') LOG.info("Generating transfer record for volume %s" % volume_id) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['status'] != "available": raise exception.InvalidVolume(reason=_("status must be available")) # The salt is just a short random string. salt = self._get_random_string(CONF.volume_transfer_salt_length) auth_key = self._get_random_string(CONF.volume_transfer_key_length) crypt_hash = self._get_crypt_hash(salt, auth_key) # TODO(ollie): Transfer expiry needs to be implemented. transfer_rec = {'volume_id': volume_id, 'display_name': display_name, 'salt': salt, 'crypt_hash': crypt_hash, 'expires_at': None} try: transfer = self.db.transfer_create(context, transfer_rec) except Exception: LOG.error(_("Failed to create transfer record for %s") % volume_id) raise return {'id': transfer['id'], 'volume_id': transfer['volume_id'], 'display_name': transfer['display_name'], 'auth_key': auth_key, 'created_at': transfer['created_at']}
def start_execute_cmd(self,cmd,type): fsc_ip = self.get_ip_port() manage_ip = self.get_manage_ip() ip_num = len(fsc_ip) LOG.debug(_("fsc_ip is %s") %fsc_ip) if ip_num <= 0: return None if ip_num > 3: ip_num = 3 exec_result='' result='' if type: for ip in fsc_ip: cmd_args='' ip.replace('\n','') cmd_args ='sudo cinder-rootwrap /etc/cinder/rootwrap.conf ' + fsc_cli + ' '+ '--ip' + ' '+ ip.replace('\n','') + ' '+ '--manage_ip' + ' '+ manage_ip.replace('\n','') + ' '+ '--port' + ' '+ fsc_port + ' '+ cmd LOG.debug(_("DSWARE cmd_args is %s") %cmd_args) exec_result=self.execute(cmd_args) if exec_result: for line in exec_result: if re.search('^result=0', line): return exec_result elif re.search('^result=50150007', line): return 'result=0' elif re.search('^result=50150008', line): return 'result=0' elif re.search('^result=50', line): return exec_result return exec_result else: for ip in fsc_ip: cmd_args='' ip.replace('\n','') cmd_args = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf ' + fsc_cli + ' '+ '--ip' + ' '+ ip.replace('\n','') + ' '+ '--manage_ip' + ' '+ manage_ip.replace('\n','') + ' '+ '--port' + ' '+ fsc_port + ' '+ cmd LOG.debug(_("DSWARE cmd_args is %s") %cmd_args) exec_result=self.execute(cmd_args) if exec_result: for line in exec_result: if re.search('^result=', line): result=line if re.search('^result=0', line): return line elif re.search('^result=50150007', line): return 'result=0' elif re.search('^result=50150008', line): return 'result=0' elif re.search('^result=50', line): return line return result
def register_opts(conf): """Registration of options for this driver.""" #NOTE(ewindisch): ZMQ_CTX and matchmaker # are initialized here as this is as good # an initialization method as any. # We memoize through these globals global ZMQ_CTX global matchmaker global CONF if not CONF: conf.register_opts(zmq_opts) CONF = conf # Don't re-set, if this method is called twice. if not ZMQ_CTX: ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts) if not matchmaker: # rpc_zmq_matchmaker should be set to a 'module.Class' mm_path = conf.rpc_zmq_matchmaker.split('.') mm_module = '.'.join(mm_path[:-1]) mm_class = mm_path[-1] # Only initialize a class. if mm_path[-1][0] not in string.ascii_uppercase: LOG.error(_("Matchmaker could not be loaded.\n" "rpc_zmq_matchmaker is not a class.")) raise RPCException(_("Error loading Matchmaker.")) mm_impl = importutils.import_module(mm_module) mm_constructor = getattr(mm_impl, mm_class) matchmaker = mm_constructor()
def register(self, proxy, in_addr, zmq_type_in, out_addr=None, zmq_type_out=None, in_bind=True, out_bind=True, subscribe=None): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) if not out_addr: return if zmq_type_out not in (zmq.PUSH, zmq.PUB): raise RPCException("Bad output socktype") # Items push out. outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) self.mapping[inq] = outq self.mapping[outq] = inq self.sockets.append(outq) LOG.info(_("Out reactor registered"))
def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(GlusterfsDriver, self).do_setup(context) self._nova = compute.API() config = self.configuration.glusterfs_shares_config if not config: msg = (_("There's no Gluster config file configured (%s)") % 'glusterfs_shares_config') LOG.warn(msg) raise exception.GlusterfsException(msg) if not os.path.exists(config): msg = (_("Gluster config file at %(config)s doesn't exist") % {'config': config}) LOG.warn(msg) raise exception.GlusterfsException(msg) self.shares = {} try: self._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed')) else: raise self._refresh_mounts()
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = ZMQ_CTX.socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" ret, output = self.dpl.delete_vdev_snapshot( self._conver_uuid2hex(snapshot['volume_id']), self._conver_uuid2hex(snapshot['id'])) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, snapshot['volume_id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to delete snapshot (failed to ' 'get event) %(id)s.') % {'id': snapshot['id']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENAVAIL: msg = _('Flexvisor snapshot %(id)s not existed.') % \ {'id': snapshot['id']} LOG.info(msg) elif ret != 0: msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor succeed to delete ' 'snapshot %(id)s.') % {'id': snapshot['id']} LOG.info(msg)
def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.info(_('Cloning volume %(src)s to volume %(dst)s') % {'src': src_vref['id'], 'dst': volume['id']}) if src_vref['status'] != 'available': msg = _("Volume status must be 'available'.") raise exception.InvalidVolume(msg) volume_name = CONF.volume_name_template % volume['id'] volume_info = {'provider_location': src_vref['provider_location'], 'size': src_vref['size'], 'id': volume['id'], 'name': volume_name, 'status': src_vref['status']} temp_snapshot = {'volume_name': volume_name, 'size': src_vref['size'], 'volume_size': src_vref['size'], 'name': 'clone-snap-%s' % src_vref['id'], 'volume_id': src_vref['id'], 'id': 'tmp-snap-%s' % src_vref['id'], 'volume': src_vref} self._create_snapshot(temp_snapshot) try: self._copy_volume_from_snapshot(temp_snapshot, volume_info, src_vref['size']) finally: self._delete_snapshot(temp_snapshot) return {'provider_location': src_vref['provider_location']}
def _multi_send(method, context, topic, msg, timeout=None): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = matchmaker.queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout, "No match from matchmaker." # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, _topic, msg, timeout) return return method(_addr, context, _topic, _topic, msg, timeout)
def create_snapshot(self, snapshot): """Creates a snapshot.""" ret, output = self.dpl.create_vdev_snapshot( self._conver_uuid2hex(snapshot['volume_id']), self._conver_uuid2hex(snapshot['id']), snapshot.get('display_name', ''), snapshot.get('display_description', '')) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, snapshot['volume_id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to create snapshot for volume ' '%(id)s: %(status)s.') % \ {'id': snapshot['volume_id'], 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to create snapshot for volume ' '(failed to get event) %(id)s.') % \ {'id': snapshot['volume_id']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create snapshot for volume %(id)s: ' '%(status)s.') % {'id': snapshot['volume_id'], 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg)
def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" ret, output = self.dpl.unassign_vdev( self._conver_uuid2hex(volume['id']), connector['initiator']) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event( self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to unassign volume %(id)s:' ' %(status)s.') % {'id': volume['id'], 'status': status} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to unassign volume (get event) ' '%(id)s.') % {'id': volume['id']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: msg = _('Flexvisor already unassigned volume ' '%(id)s.') % {'id': volume['id']} LOG.info(msg) elif ret != 0: msg = _('Flexvisor failed to unassign volume:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg)
class NoFibreChannelVolumeDeviceFound(BrickException): message = _("Unable to find a Fibre Channel volume device.")
class InvalidSortKey(Exception): message = _("Sort key supplied was not valid.")
def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise ColumnError(_("Unsupported id columns type"))
def factory(protocol, root_helper, driver=None, execute=putils.execute, use_multipath=False, device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, *args, **kwargs): """Build a Connector object based upon protocol.""" LOG.debug("Factory for %s" % protocol) protocol = protocol.upper() if protocol == "ISCSI": return ISCSIConnector(root_helper=root_helper, driver=driver, execute=execute, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, *args, **kwargs) elif protocol == "ISER": return ISERConnector(root_helper=root_helper, driver=driver, execute=execute, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, *args, **kwargs) elif protocol == "FIBRE_CHANNEL": return FibreChannelConnector( root_helper=root_helper, driver=driver, execute=execute, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, *args, **kwargs) elif protocol == "AOE": return AoEConnector(root_helper=root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) elif protocol == "NFS" or protocol == "GLUSTERFS": return RemoteFsConnector(mount_type=protocol.lower(), root_helper=root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) elif protocol == "LOCAL": return LocalConnector(root_helper=root_helper, driver=driver, execute=execute, device_scan_attempts=device_scan_attempts, *args, **kwargs) else: msg = (_("Invalid InitiatorConnector protocol " "specified %(protocol)s") % dict(protocol=protocol)) raise ValueError(msg)
def connect_volume(self, connection_properties): """Attach the volume to instance_name. connection_properties for Fibre Channel must include: target_portal - ip and optional port target_iqn - iSCSI Qualified Name target_lun - LUN id of the volume """ LOG.debug("execute = %s" % self._execute) device_info = {'type': 'block'} ports = connection_properties['target_wwn'] wwns = [] # we support a list of wwns or a single wwn if isinstance(ports, list): for wwn in ports: wwns.append(str(wwn)) elif isinstance(ports, basestring): wwns.append(str(ports)) # We need to look for wwns on every hba # because we don't know ahead of time # where they will show up. hbas = self._linuxfc.get_fc_hbas_info() host_devices = [] for hba in hbas: pci_num = self._get_pci_num(hba) if pci_num is not None: for wwn in wwns: target_wwn = "0x%s" % wwn.lower() host_device = ( "/dev/disk/by-path/pci-%s-fc-%s-lun-%s" % (pci_num, target_wwn, connection_properties.get('target_lun', 0))) host_devices.append(host_device) if len(host_devices) == 0: # this is empty because we don't have any FC HBAs msg = _("We are unable to locate any Fibre Channel devices") LOG.warn(msg) raise exception.NoFibreChannelHostsFound() # The /dev/disk/by-path/... node is not always present immediately # We only need to find the first device. Once we see the first device # multipath will have any others. def _wait_for_device_discovery(host_devices): tries = self.tries for device in host_devices: LOG.debug(_("Looking for Fibre Channel dev %(device)s"), {'device': device}) if os.path.exists(device): self.host_device = device # get the /dev/sdX device. This is used # to find the multipath device. self.device_name = os.path.realpath(device) raise loopingcall.LoopingCallDone() if self.tries >= self.device_scan_attempts: msg = _("Fibre Channel volume device not found.") LOG.error(msg) raise exception.NoFibreChannelVolumeDeviceFound() LOG.warn( _("Fibre volume not yet found. " "Will rescan & retry. Try number: %(tries)s"), {'tries': tries}) self._linuxfc.rescan_hosts(hbas) self.tries = self.tries + 1 self.host_device = None self.device_name = None self.tries = 0 timer = loopingcall.FixedIntervalLoopingCall( _wait_for_device_discovery, host_devices) timer.start(interval=2).wait() tries = self.tries if self.host_device is not None and self.device_name is not None: LOG.debug( _("Found Fibre Channel volume %(name)s " "(after %(tries)s rescans)"), { 'name': self.device_name, 'tries': tries }) # see if the new drive is part of a multipath # device. If so, we'll use the multipath device. if self.use_multipath: mdev_info = self._linuxscsi.find_multipath_device(self.device_name) if mdev_info is not None: LOG.debug( _("Multipath device discovered %(device)s") % {'device': mdev_info['device']}) device_path = mdev_info['device'] devices = mdev_info['devices'] device_info['multipath_id'] = mdev_info['id'] else: # we didn't find a multipath device. # so we assume the kernel only sees 1 device device_path = self.host_device dev_info = self._linuxscsi.get_device_info(self.device_name) devices = [dev_info] else: device_path = self.host_device dev_info = self._linuxscsi.get_device_info(self.device_name) devices = [dev_info] device_info['path'] = device_path device_info['devices'] = devices return device_info
def connect_volume(self, connection_properties): """Attach the volume to instance_name. connection_properties for iSCSI must include: target_portal - ip and optional port target_iqn - iSCSI Qualified Name target_lun - LUN id of the volume """ device_info = {'type': 'block'} if self.use_multipath: #multipath installed, discovering other targets if available target_portal = connection_properties['target_portal'] out = self._run_iscsiadm_bare(['-m', 'discovery', '-t', 'sendtargets', '-p', target_portal], check_exit_code=[0, 255])[0] \ or "" for ip, iqn in self._get_target_portals_from_iscsiadm_output(out): props = connection_properties.copy() props['target_portal'] = ip props['target_iqn'] = iqn self._connect_to_iscsi_portal(props) self._rescan_iscsi() else: self._connect_to_iscsi_portal(connection_properties) host_device = self._get_device_path(connection_properties) # The /dev/disk/by-path/... node is not always present immediately # TODO(justinsb): This retry-with-delay is a pattern, move to utils? tries = 0 while not os.path.exists(host_device): if tries >= self.device_scan_attempts: raise exception.VolumeDeviceNotFound(device=host_device) LOG.warn( _("ISCSI volume not yet found at: %(host_device)s. " "Will rescan & retry. Try number: %(tries)s"), { 'host_device': host_device, 'tries': tries }) # The rescan isn't documented as being necessary(?), but it helps self._run_iscsiadm(connection_properties, ("--rescan", )) tries = tries + 1 if not os.path.exists(host_device): time.sleep(tries**2) if tries != 0: LOG.debug( _("Found iSCSI node %(host_device)s " "(after %(tries)s rescans)"), { 'host_device': host_device, 'tries': tries }) if self.use_multipath: #we use the multipath device instead of the single path device self._rescan_multipath() multipath_device = self._get_multipath_device_name(host_device) if multipath_device is not None: host_device = multipath_device device_info['path'] = host_device return device_info
class VolumeDeviceNotFound(BrickException): message = _("Volume device not found at %(device)s.")
class InvalidParameterValue(Invalid): message = _("%(err)s")
class NotFound(BrickException): message = _("Resource could not be found.") code = 404 safe = True
class NoFibreChannelHostsFound(BrickException): message = _("We are unable to locate any Fibre Channel devices.")
class ISCSITargetRemoveFailed(BrickException): message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class Invalid(BrickException): message = _("Unacceptable parameters.") code = 400
class VolumeGroupCreationFailed(BrickException): message = _('Failed to create Volume Group: %(vg_name)s')
class ISCSITargetAttachFailed(BrickException): message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
def _verify_rtstool(self): try: self._execute('rtstool', 'verify') except (OSError, putils.ProcessExecutionError): LOG.error(_('rtstool is not installed correctly')) raise
class ISCSITargetCreateFailed(BrickException): message = _("Failed to create iscsi target for volume %(volume_id)s.")
def create(self): LOG.debug(_('Pool creating new connection')) return self.connection_cls(self.conf)
class VolumeGroupNotFound(BrickException): message = _('Unable to find Volume Group: %(vg_name)s')
class InvalidPeriodicTaskArg(Exception): message = _("Unexpected argument for periodic task creation: %(arg)s.")
def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # Note(jdg) tid and lun aren't used by TgtAdm but remain for # compatibility fileutils.ensure_tree(self.volumes_dir) vol_id = name.split(':')[1] if chap_auth is None: volume_conf = """ <target %s> backing-store %s </target> """ % (name, path) else: volume_conf = """ <target %s> backing-store %s %s </target> """ % (name, path, chap_auth) LOG.info(_('Creating iscsi_target for: %s') % vol_id) volumes_dir = self.volumes_dir volume_path = os.path.join(volumes_dir, vol_id) f = open(volume_path, 'w+') f.write(volume_conf) f.close() old_persist_file = None old_name = kwargs.get('old_name', None) if old_name is not None: old_persist_file = os.path.join(volumes_dir, old_name) try: (out, err) = self._execute('tgt-admin', '--update', name, run_as_root=True) LOG.debug("StdOut from tgt-admin --update: %s" % out) LOG.debug("StdErr from tgt-admin --update: %s" % err) # Grab targets list for debug # Consider adding a check for lun 0 and 1 for tgtadm # before considering this as valid (out, err) = self._execute('tgtadm', '--lld', 'iscsi', '--op', 'show', '--mode', 'target', run_as_root=True) LOG.debug("Targets after update: %s" % out) except putils.ProcessExecutionError as e: LOG.error( _("Failed to create iscsi target for volume " "id:%(vol_id)s: %(e)s") % { 'vol_id': vol_id, 'e': str(e) }) #Don't forget to remove the persistent file we created os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error( _("Failed to create iscsi target for volume " "id:%(vol_id)s. Please ensure your tgtd config file " "contains 'include %(volumes_dir)s/*'") % { 'vol_id': vol_id, 'volumes_dir': volumes_dir, }) raise exception.NotFound() # NOTE(jdg): Sometimes we have some issues with the backing lun # not being created, believe this is due to a device busy # or something related, so we're going to add some code # here that verifies the backing lun (lun 1) was created # and we'll try and recreate it if it's not there if not self._verify_backing_lun(iqn, tid): try: self._recreate_backing_lun(iqn, tid, name, path) except putils.ProcessExecutionError: os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Finally check once more and if no go, fail and punt if not self._verify_backing_lun(iqn, tid): os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) if old_persist_file is not None and os.path.exists(old_persist_file): os.unlink(old_persist_file) return tid
def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true self.bindings = [] self.no_heartbeat_msg = _('Matchmaker does not implement ' 'registration or heartbeat.')
def _add_unique_id(msg): """Add unique_id for checking duplicate messages.""" unique_id = uuid.uuid4().hex msg.update({UNIQUE_ID: unique_id}) LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class DBInvalidUnicodeParameter(Exception): message = _("Invalid Parameter: " "Unicode is not supported by the current database.")
class DeprecatedConfig(Exception): message = _("Fatal call to deprecated config: %(msg)s") def __init__(self, msg): super(Exception, self).__init__(self.message % dict(msg=msg))
def _consume(sock): LOG.info(_("Consuming socket")) while True: self.consume(sock)
class MatchMakerException(Exception): """Signified a match could not be found.""" message = _("Match not found by MatchMaker.")
def recv(self): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) return self.sock.recv_multipart()
def _deserialize(data): """ Deserialization wrapper """ LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data)
def __init__(self, rule): msg = _("Policy doesn't allow %s to be performed.") % rule super(PolicyNotAuthorized, self).__init__(msg)
def send(self, data): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) self.sock.send_multipart(data)