def _execute_cmd(self, cmd): """ Execute cli with status update. Executes CLI commands such as cfgsave where status return is expected """ with Timeout(self.cmd_timeout): try: self.info(_("running: '%s'") % cmd) stdin, stdout, stderr = self.client.exec_command(cmd) stdin.write("%s\n" % ZoneConstant.YES) # Wait for command completion. exit_status = stdout.channel.recv_exit_status() stdin.flush() data = stdout.read() self.debug(_("Returned data was %r") % data) stdin.close() stdout.close() stderr.close() except Timeout as timeout: # Close the connection so that nobody tries to re-used it. self.close_connection() desc = self.exception_desc raise exception.\ FabricCommandTimeoutException(desc, timeout=self.cmd_timeout, cmd=cmd) except Exception as e: with excutils.save_and_reraise_exception(): msg = _("Error running command via ssh: %s") % e self.error(msg)
def wrapped_func(*args, **kwarg): if fn.__name__ == 'create': LOG = logging.getLogger(fn.__module__) try: idx_svol = inspect.getargspec(fn).args.index("source_volume") idx_voltp = inspect.getargspec(fn).args.index("volume_type") except Exception as e: LOG.error(_("cinder volume_create interface has been changed. " "PowerVC monkey patch for volume clone won't work." " function: %(fx)s, arguments: %(args)s") % dict(fx=fn.__module__, args=inspect.getargspec(fn).args)) raise e svol = kwarg.get('source_volume') voltp = kwarg.get('volume_type') if svol and voltp: if svol['volume_type_id'] != voltp['id']: # this is the condition that will trigger the # volume clone to fail. Patch it here. svol['volume_type_id'] = voltp['id'] LOG.info(_("Monkey patched volume clone by paxes " "volume_create_decorator(). Source Volume ID: " "%(svol)s, volume type name: %(voltpnm)s, " "volume type ID: %(voltpid)s") % dict(svol=svol['id'], voltpnm=voltp['name'], voltpid=voltp['id'])) return fn(*args, **kwarg) return fn(*args, **kwarg)
def _initialise_connection(self): self.client = paramiko.SSHClient() self.client.load_host_keys(SSH_KNOWN_HOST_FILE) self.client.set_missing_host_key_policy(paramiko.RejectPolicy()) try: self.client.connect(self.switch_ip, self.port, self.username, self.password, timeout=self.timeout) except socket.timeout: # Connection timeout raise exception.FabricTimeoutException(self.exception_desc, timeout=str(self.timeout)) except paramiko.AuthenticationException: # Invalid username/password raise exception.FabricAuthException(self.exception_desc) except socket.gaierror as e: # Bad hostname/IP address raise exception.FabricConnectionException(self.exception_desc, error=e.args[0], detail=e.args[1]) except Exception as e: # Catch everything else and raise the generic exception raise exception.FabricUnknownException(self.exception_desc, _("Could not connect"), e) self.debug(_("SSH Client created for %s"), self.exception_desc)
def _parse_scg_vios_id(k2aclient, scg_vios_id): # scg_vios_id = "789542X_066B1FB##1" if (len(scg_vios_id) != 18 or scg_vios_id[7] != "_"): msg = (_("x-scg-vios-id: >%(id)s< not of form 'ttttmmm_sssssssnnn'") % dict(id=scg_vios_id)) raise ValueError(msg) (pt1, pt2) = scg_vios_id.split("_", 1) input_machine_type = pt1[:4] input_machine_model = pt1[-3:] input_serial_number = pt2[:-3] input_partition_id = str(int(pt2[-3:].replace("#", "0"))) try: mss = k2aclient.managedsystem.list() except Exception as e: msg = (_("scohack: x-scg-vios-id: cant list ManagedSystem: >%(ex)s<") % dict(ex=e)) raise Exception(msg) ms_found = None for ms in mss: machine_type = ms.machine_type_model_and_serial_number.machine_type model = ms.machine_type_model_and_serial_number.model serial_number = ms.machine_type_model_and_serial_number.serial_number if (input_machine_type == machine_type and input_machine_model == model and input_serial_number == serial_number): ms_found = ms break if ms_found is None: msg = (_("scohack: x-scg-vios-id: cant find ManagedSystem for: " ">%(id)s<") % dict(id=scg_vios_id)) raise Exception(msg) vios_found = None try: vioss = k2aclient.virtualioserver.list(ms_found.id) except Exception as e: msg = (_("scohack: x-scg-vios-id: cant list VirtualIoServer: " ">%(ex)s<") % dict(ex=e)) raise Exception(msg) for vios in vioss: if input_partition_id == vios.partition_id: vios_found = vios break if vios_found is None: msg = (_("scohack: x-scg-vios-id: cant find VirtualIOServer for: " ">%(id)s<") % dict(id=scg_vios_id)) raise Exception(msg) return (ms_found, vios_found)
def parse_uri(self, uri): if not uri.startswith('scohack://'): reason = _("URI must start with scohack://") LOG.error(reason) msg = (_("BadStore: uri: >%(uri)s<, reason: >%(reason)s<") % dict(uri=uri, reason=reason)) raise Exception(msg) self.scheme = 'scohack' self.volume_id = uri[10:]
def _get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: active zone set map """ zoneSet = {} zone = {} zoneMember = ZoneConstant.EMPTY zoneSetName = ZoneConstant.EMPTY zoneName = ZoneConstant.EMPTY switchData = '' try: switchData = self._get_switch_data( ZoneConstant.GET_ACTIVE_ZONE_CFG) except Exception: with excutils.save_and_reraise_exception(): self.error(_("Failed getting active zone set " "from %s"), self.exception_desc) try: for line in switchData: lineSplit = re.split('\\t', line) if len(lineSplit) > 2: lineSplit = [x.replace( '\n', ZoneConstant.EMPTY) for x in lineSplit] lineSplit = [x.replace( ZoneConstant.SPACE, ZoneConstant.EMPTY) for x in lineSplit] if ZoneConstant.CFG_ZONESET in lineSplit: #ZoneSet zoneSetName = lineSplit[1] zoneSet[ZoneConstant.ACTIVE_ZONE_CONFIG] = zoneSetName continue if lineSplit[1]: #Zone zoneName = lineSplit[1] zone[zoneName] = list() if lineSplit[2]: #ZoneMember zoneMember = lineSplit[2] if zoneMember: #zonemember zoneMemberList = zone.get(zoneName) zoneMemberList.append(zoneMember) zoneSet[ZoneConstant.CFG_ZONES] = zone except Exception: """Incase of parsing error here, it should be malformed cli output """ msg = _("Malformed zone configuration") raise exception.FCZoneDriverException(reason=msg) self.error(_("Failed getting active zone set from " "fabric %s"), self.switch_ip) return zoneSet
def _schedule(self, context, request_spec, filter_properties=None): """ Returns a list of hosts that meet the required specs, ordered by their fitness. """ s = super(PowerVCSchedulerDriver, self) hosts = s._schedule(context, request_spec, filter_properties=filter_properties) if not hosts: # no hosts fitted. At least we cannot find the hosts # that matches capacity requirement. Log an error to # to volume meta data. # collect request related information volume_id = request_spec['volume_id'] vol_properties = request_spec['volume_properties'] req_size = vol_properties['size'] # collect host_state information elevated = context.elevated() all_hosts = self.host_manager.get_all_host_states(elevated) # For now we are only focusing on the capacity. req_info = (_('volume request: ' 'requested size: %(size)s. ') % {'size': req_size}) info = '' for hstate_info in all_hosts: ts = timeutils.isotime(at=hstate_info.updated) info += (_("{host: %(hostname)s, free_capacity: %(free_cap)s, " "total_capacity: %(total)s, reserved_percentage:" " %(reserved)s, last update: %(time_updated)s}") % {'hostname': hstate_info.host, 'free_cap': hstate_info.free_capacity_gb, 'total': hstate_info.total_capacity_gb, 'reserved': hstate_info.reserved_percentage, 'time_updated': ts}) if len(info) > 0: msg = (_('request exceeds capacity: ' + req_info + ('available capacity: %(info)s') % {'info': info})) else: msg = (_("No storage has been registered. " + req_info)) LOG.error(("Schedule Failure: volume_id: %s, " % volume_id) + msg) meta_data = {'schedule Failure description': msg[:255]} db.volume_update(context, volume_id, {'metadata': meta_data}) return None else: return hosts
def _load_fabric_opts(self): """ We are not running in the volume service, so programmatically parse the fabrics.conf file and retrieve options that we care about so that fabric manager (a.k.a zone manager) can be initialized. Return a Configuration object with the fc_fabric_names property set. Return None if no fabrics are registered. """ parser = ConfigParser.RawConfigParser() parser.read('/etc/cinder/fabrics.conf') if not parser.has_option('DEFAULT', 'fc_fabric_names'): LOG.info(_("No fabrics registered.")) return None fabric_names = parser.get('DEFAULT', 'fc_fabric_names') fabric_names_list = fabric_names.split(',') if not fabric_names_list: LOG.info(_("No fabric names registered.")) key_prefixes = ['fc_fabric_address_', 'fc_fabric_user_', 'fc_fabric_password_', 'fc_fabric_display_name_'] key_prefixes_debug = ['fc_fabric_address_', 'fc_fabric_user_', 'fc_fabric_display_name_'] options = [] # list of option tuples options_debug = [] # list of option tuples for debug # Loop over the fabric names (e.g. 'a', 'b') for fab in fabric_names_list: options.extend([(k + fab, parser.get('DEFAULT', k + fab)) for k in key_prefixes]) options_debug.extend([(k + fab, parser.get('DEFAULT', k + fab)) for k in key_prefixes_debug]) # Instead of getting max attempts from the conf file, we # use an override here of 1 maximum attempt. We don't want # to keep trying for the default of 3 times, which will take # 30 or more seconds to timeout for a badly behaving switch. # The main use case for looking up wwpns comes from Nova's # host storage topology reconciliation periodic task and # the waiting of that task on the response here should be # minimized. options.append(('fc_fabric_num_attempts_' + fab, 1)) options_debug.append(('fc_fabric_num_attempts_' + fab, 1)) LOG.debug("Fabric options: %s" % options_debug) # Construct the empty configuration, then set the fabric names # to it, for use by the zone manager. conf = Configuration([]) conf.local_conf.import_opt( 'fc_fabric_names', 'paxes_cinder.zonemanager.paxes_fc_zone_manager') conf.local_conf.fc_fabric_names = fabric_names self.opts = options return conf
def _retry_if_problems(self, func, *args, **kwargs): """ Executes a function with the specified arguments, retrying a specified number of times, pausing for a random time between attempts. In all exception cases, we shutdown and re-establish the SSH connection. We log all issues as INFO messages and indicate that we will retry. Exceptions that are subclasses of FabricException can tell us that it is not worth retrying. """ attempts = 0 while attempts < self.max_attempts: attempts += 1 try: self._ensure_connection() return func(*args, **kwargs) except Exception as e: # Doing str(e) on one of our fabric exceptions blows up with # 'UnicodeError: Message objects do not support str()...', so # case it out here for logging. if 'msg' in dir(e): err = e.msg else: err = _("%s") % e self.info(_("Problem: %(problem)s") % {'problem': err}) # Ensure that the connection has been closed self.close_connection() # We retry if we've not hit the retry limit, and the exception # was either something that we don't understand, or a # FabricException that tells us that retrying is worthwhile. if attempts < self.max_attempts and \ (not isinstance(e, exception.FabricException) or e.attempt_retry): # We wait a random amount of time between the retry_min_gap # and the retry_max_gap, plus retry_max_gap * (retries-1) # so that the time between attempts increases sleep_time = (random.randint(self.retry_min_gap, self.retry_max_gap) + self.retry_max_gap * (attempts-1)) self.info(_("Will retry after %(seconds)s seconds...") % {'seconds': sleep_time}) greenthread.sleep(seconds=sleep_time) continue else: # We either ran out of retries or we got an exception that # was not worth retrying. raise
def _get_switch_data(self, cmd): try: stdin, stdout, stderr = self.client.exec_command(cmd) stdin.close() switchData = '' switchData = stdout.readlines() stdout.close() stderr.close() except Exception as e: with excutils.save_and_reraise_exception(): msg = _("Failed when executing command '%(cmd)s': %(err)s") \ % {'cmd': cmd, 'err': _("%s") % e} self.error(msg) return switchData
def get_wwpn_fabric_map(self, wwpn_list): """ Given a list of WWPN initiators, return a dictionary mapping to the fabrics where the WWPN is logged into. :param wwpn_list: The list of wwpns. These strings should not be tokenized with colons (:). Example: [ "10000090FA2A5866", "10000090FA2A8923", "c0507606d56e03af" ] :returns: A dictionary mapping of input wwpns to discovered fabric names. Example: { "10000090FA2A8923": "B", "10000090FA2A5866": "A", "c0507606d56e03af": None } """ if not self.fabricmanager: msg = _("Could not obtain a fabric manager client. Fibre " "Channel switch fabrics may not be registered.") raise exc.HTTPInternalServerError(explanation=six.text_type(msg)) # Add colons into input wwpns and put in a mapping dictionary. with_colons = dict([[fabmanager.get_formatted_wwn(w.lower()), w] for w in wwpn_list]) LOG.debug("ENTER get_wwpn_fabric_map: colon_map = %s" % with_colons) input_dict = {'initiators': with_colons.keys()} try: data = self.fabricmanager.get_san_context(input_dict) except Exception as ex: # The exception here may not be the root cause. If we cannot # get a normal response, then we raise our own error here. LOG.exception(ex) msg = (_("Unable to retrieve WWPN initiator information from " "one or more fabric switches. Error: %(ex)s") % dict(ex=ex)) raise exc.HTTPInternalServerError(explanation=six.text_type(msg)) LOG.debug("Zonemanager.get_san_context() returns: %s" % data) # Build a dictionary mapping of the original wwpn passed to the # result of the fabric lookup. map_dict = dict([[w, self._match_fabric(c, data)] for c, w in with_colons.iteritems()]) LOG.debug("RETURNS wwpn_mapping_dict: %s" % map_dict) return map_dict
def add(self, image_file, image_meta, context): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_file: The image data to write, as a file-like object :param image_meta: Image metadata :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information """ hmc_id = image_meta.get("hmc_id") volume_id = image_meta.get("volume_id") k2aclient, managementconsole = self._getk2client(context, hmc_id) vios = _get_vios(k2aclient, image_meta) volume = self._get_volume(context, volume_id) msg = (_("scohack: vios: >%(vios)s<, volume: >%(vol)s<") % dict(vios=vios.id, vol=volume["id"])) LOG.info(msg) image_meta['vios_id'] = vios.id image_meta['volume_name'] = volume['display_name'] connector = self._get_connector(vios) try: (hdisk_name, hdisk_uuid, vios) = self._discover_volume( context, k2aclient, vios, volume, connector) msg = (_("scohack: image upload: hdisk: >%(hdisk)s<, " "vios: >%(vios)s<") % dict(hdisk=hdisk_uuid, vios=vios.id)) LOG.info(msg) self._upload(context, k2aclient, managementconsole, image_meta, image_file, vios, hdisk_name, hdisk_uuid) finally: self.volume_rpcapi.\ terminate_connection(context, volume, connector)
def discover_volumes(self, context, filters=None): volume_l = [] # 1 GB = 1073741824 Bytes bytesPerGigaByte = 1073741824 try: connection = self.common._get_ecom_connection() except: exception_message = _("Could not establish ecom connection") LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) volumes = connection.EnumerateInstances("EMC_StorageVolume") for v in volumes: volume_d = {} numBlocks = v["NumberOfBlocks"] blockSize = v["BlockSize"] size = int((numBlocks * blockSize) / bytesPerGigaByte) statuses = v["StatusDescriptions"] status = "available" if "ONLINE" in statuses else "error" volume_d["name"] = v["ElementName"] volume_d["size"] = size volume_d["restricted_metadata"] = {"vdisk_id": v["DeviceID"], "vdisk_name": v["ElementName"]} volume_d["status"] = status volume_d["support"] = {"status": "supported"} volume_l.append(volume_d) return volume_l
def get_all_host_states(self, context): """ The host_state_map in the HostManager isn't maintained properly during storage host registration/deregistration. Override get_all_host_states and add proper serialization. For example: {'192.168.1.100': HostState(), ...} """ topic = CONF.volume_topic for host, host_state in self.host_state_map.items(): try: db.service_get_by_host_and_topic(context, host, topic) except exception.ServiceNotFound: # The host has been deregistered LOG.debug(_("clean up host_state_map: %(host)s" % {'host': host})) del self.host_state_map[host] continue s = super(PowerVCHostManager, self) hosts = s.get_all_host_states(context) return hosts
def _do_delete_zones(fabric_name, conn, initiators): # We look in the zone list for zones containing precisely one of # the specified initiators and one other thing, and remove it. # The zone name must also use our prefix. initiator_set = set(initiators) zones_to_delete = [] # Maps zone name to list of WWPNs. WWPNs are in colon-expanded # format. zone_map = conn.get_active_zone_map() for zone_name, zone_wwpns in zone_map.iteritems(): zone_set = set(zone_wwpns) if zone_name.startswith(self.conf.zone_name_prefix) \ and len(zone_set & initiator_set) == 1: zones_to_delete.append(zone_name) if len(zones_to_delete) > 0: LOG.info(_("%(fabric_descriptor)s: deleting zones " "%(zone_list)s") % {'fabric_descriptor': conn.exception_desc, 'zone_list': ', '.join(zones_to_delete)} ) conn.delete_zones(zones_to_delete, self.conf.zone_activate)
def executecmd(self, cmd): """ Execute cli with no status update. Executes CLI commands such as addZone where status return is not expected """ with Timeout(self.cmd_timeout): try: self.info(_("Executing: %s"), cmd) stdin, stdout, stderr = self.client.exec_command(cmd) output = stdout.readlines() stdin.close() stdout.close() stderr.close() if output: res = output[0] desc = self.exception_desc self.error(_("CLI execution returned: %s"), output) if "you are not the owner of that transaction" \ in output[0]: raise exception.FabricTransactionException(desc, cmd=cmd) if "Zone DB too large" in output[0]: raise exception.FabricSizeExceededException(desc, cmd=cmd, res=res) raise exception.FabricUnexpectedResponseException(desc, cmd=cmd, res=res) except Timeout as timeout: # Close the connection so that nobody tries to re-used it. self.close_connection() desc = self.exception_desc raise exception.\ FabricCommandTimeoutException(desc, timeout=self.cmd_timeout, cmd=cmd) except Exception as e: with excutils.save_and_reraise_exception(): msg = _("Error executing command '%(cmd)s': %(err)s") \ % {'cmd': cmd, 'err': _("%s") % e} self.error(msg) return (True, ZoneConstant.SUCCESS)
def downgrade(migration_engine): """ Downgrades the Cinder DB Tables to remove those for 1.2 """ metadata = MetaData(migration_engine) metadata.reflect(migration_engine) metadata.bind = migration_engine #Loop through all of the DTO's that we added new in PowerVC 1.2 #dropping the tables off of the definition defined in the DTO for dto in reversed(models.POWERVC_V1R2_DTOS): try: dto.__table__.metadata = metadata dto.__table__.drop(checkfirst=True) except Exception as exc: LOG.info(_(repr(dto.__table__))) tbl = dto.__table__.__class__.__name__ LOG.exception(_('Exception dropping table %(table)s: %(ex)s') % dict(table=tbl, ex=exc)) raise exc
def update_volume_status(self): """Retrieve status info.""" LOG.debug(_("Updating volume status")) self.conn = self._get_ecom_connection() storage_type = self._get_storage_type() pool, storagesystem = self._find_pool(storage_type, True) self.stats["total_capacity_gb"] = (pool["TotalManagedSpace"]) / int(units.GiB) self.stats["free_capacity_gb"] = (pool["RemainingManagedSpace"]) / int(units.GiB) return self.stats
def upgrade(migration_engine): """ Upgrades the Cinder DB Tables to include those for 1.2 """ metadata = MetaData(migration_engine) metadata.reflect(migration_engine) metadata.bind = migration_engine #Loop through all of the DTO's that we added new in PowerVC 1.2 #creating the tables off of the definition defined in the DTO for dto in models.POWERVC_V1R2_DTOS: try: table = dto.__table__.tometadata(metadata, None) table.create(checkfirst=True) except Exception as exc: LOG.info(_(repr(dto.__table__))) tbl = dto.__table__.__class__.__name__ LOG.exception(_('Exception creating table %(table)s: %(ex)s') % dict(table=tbl, ex=exc)) raise exc
def _query(self, location, verb, depth=0): if depth > MAX_REDIRECTS: x = (_("MaxRedirectsExceeded: redirects: >%(max)d<") % dict(max=MAX_REDIRECTS)) e = Exception(x) e.scohack_httpcode = 400 raise e loc = location.store_location conn_class = self._get_conn_class(loc) conn = conn_class(loc.netloc) conn.request(verb, loc.path, "", {}) resp = conn.getresponse() # Check for bad status codes if resp.status >= 400: reason = (_("HTTP URL returned a %(status)s status code.") % dict(status=resp.status)) x = (_("BadStoreUri: loc.path: >%(path)s<, reason: >%(reason)s<") % dict(path=loc.path, reason=reason)) e = Exception(x) e.scohack_httpcode = resp.status raise e location_header = resp.getheader("location") if location_header: if resp.status not in (301, 302): reason = ("The HTTP URL attempted to redirect with an " "invalid status code." ) x = (_("BadStoreUri: loc.path: >%(path)s<, " "reason: >%(reason)s<") % dict(path=loc.path, reason=reason)) e = Exception(x) e.scohack_httpcode = resp.status raise e location_class = paxes_cinder.scohack.scohack_location.Location new_loc = location_class(location.store_name, location.store_location.__class__, uri=location_header, image_id=location.image_id, store_specs=location.store_specs) return self._query(new_loc, verb, depth + 1) content_length = int(resp.getheader('content-length', 0)) return (conn, resp, content_length)
def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. """ pieces = urlparse.urlparse(uri) assert pieces.scheme in ('https', 'http') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path try: if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None except ValueError: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None if creds: try: self.user, self.password = creds.split(':') except ValueError: reason = (_("BadStoreUri: credentials '%(creds)s' " "not well-formatted.") % dict(creds="".join(creds))) LOG.debug(reason) e = Exception(reason) e.scohack_httpcode = 400 raise e else: self.user = None if netloc == '': reason = _("BadStoreUri: no address specified in HTTP URL") LOG.debug(reason) e = Exception(reason) e.scohack_httpcode = 400 raise e self.netloc = netloc self.path = path
def wrapped_func(*args, **kwarg): if fn.__name__ == 'create': # cinder.volume.volume_types.create() decorator r = fn(*args, **kwarg) LOG = logging.getLogger(fn.__module__) try: idx_specs = inspect.getargspec(fn).args.index('extra_specs') idx_name = inspect.getargspec(fn).args.index('name') idx_ctxt = inspect.getargspec(fn).args.index('context') except Exception as e: LOG.warn(_("Failed to get the parameters from function " "cinder.volume.volume_types.create(). Default " "quota didn't set for the storage template. " "Error: %(err)s") % dict(err=e)) # Just return. Don't set the storage template default quota. return r volume_type = args[idx_name] extra_specs = args[idx_specs] ctxt = args[idx_ctxt] volume_host = None if extra_specs and isinstance(extra_specs, dict): volume_host = extra_specs.get( "capabilities:volume_backend_name", None) if volume_host and volume_type and ctxt: volume_rpcapi = volume_rpc.VolumeAPIProduct() try: volume_rpcapi.set_volume_type_quota( ctxt, volume_host, volume_type) LOG.info(_("Successfully set default quota for storage " "template %(vol_type)s") % dict(vol_type=volume_type)) except Exception as e: LOG.warn(_("Failed to set default quota for storage " "template %(vol_type)s, error: %(err)s") % dict(vol_type=volume_type, err=e)) else: LOG.warn(_("Cannot set default quota for storage template " "%(vol_type)s due to invalid Parameters from volume " "type create.") % dict(vol_type=volume_type)) return r else: return fn(*args, **kwarg)
def _get_volume_params(self, type_id): # Get the default values opts = self._build_default_opts() if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) # Get the extra-specs specs = volume_type.get("extra_specs") for k, value in specs.iteritems(): # Get the scope, if using scope format key_split = k.split(":") if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # protocol is a special case where the user asks for a given # protocol and we want both the scheduler and the driver to act # on the value. if scope == "capabilities" and key == "storage_protocol": scope = None key = "protocol" words = value.split() self._driver_assert( words and len(words) == 2 and words[0] == "<in>", _("protocol must be specified as " "'<in> iSCSI' or '<in> FC'"), ) del words[0] value = words[0] # Any keys that the driver should look at should have the # 'drivers' scope. if scope and scope != "drivers": continue # Check for the element_type extra-spec. if key == "element_type": # Check if the user specified by name. if value in supported_element_types.keys(): # Convert the name to the matching value. # Otherwise the expected data-type won't match. value = supported_element_types[value] if key in opts: this_type = type(opts[key]).__name__ if this_type == "int": value = int(value) elif this_type == "bool": value = strutils.bool_from_string(value) opts[key] = value return opts
def _get_volume(self, context, volume_id): try: volume = self.volume_api.get(context, volume_id) except exception.NotFound: msg = (_("scohack: volume not found: >%(volume)s<") % dict(volume=volume_id)) LOG.error(msg) e = Exception(msg) e.scohack_httpcode = 400 raise e try: self.volume_api.check_attach(context, volume) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_("scohack: volume: invalid status: >%(status)s<") % dict(status=volume['status'])) return volume
def _getk2client(self, ctxt, hmc_uuid): """ create k2aclient for this volume driver instance """ # check for first time if self._hmc_uuid is None: self._hmc_uuid = hmc_uuid # maybe relaxed in future versions if hmc_uuid != self._hmc_uuid: msg = (_("scohack: only single hmc is supported: " "prev: >%(prev_id)s<, new: >%(new_id)s<") % dict(prev_id=self._hmc_uuid, new_id=hmc_uuid)) raise Exception(msg) if self._k2aclient is not None: return self._k2aclient hmc = paxes_db_api.ibm_hmc_get_by_uuid(ctxt, hmc_uuid) if hmc is None: msg = (_("scohack: no HMC for id: >%(id)s<") % dict(id=hmc_uuid)) e = Exception(msg) e.scohack_httpcode = 400 raise e self._k2aclient = self._initk2client(hmc) self._managementconsole = None try: self._managementconsole = \ self._k2aclient.managementconsole.get(hmc_uuid) except Exception as e: msg = (_("scohack:" " managemenconsole: >%(mc)s<" " could not be retrieved," " hdisks will not be cleaned up," " msg: >%(msg)s<") % dict(mc=hmc_uuid, msg=e)) LOG.warning(msg) return self._k2aclient, self._managementconsole
def delete_zones(self, zoneNames, isActivate): """ Delete zones from fabric. Method to delete the active zone config zones params zoneNames: list of zone names to delete params isActivate: True/False """ activeZoneSetName = None try: activeZoneSetName = self.get_active_zoneset_name() except Exception: with excutils.save_and_reraise_exception(): self.error(_("Failed getting active zones from " "fabric %s"), self.switch_ip) zoneString = ';'.join(zoneNames) cmd = '' try: cmd = '%s%s%s%s%s%s' % (ZoneConstant.CFG_REMOVE, "\"", activeZoneSetName, "\", \"", zoneString, "\"") status, msg = self.executecmd(cmd) if(status): for zone in zoneNames: status, msg = self._zone_delete(zone) if not status: self._cfg_trans_abort() return (False, msg) else: return (False, msg) if isActivate: self.activate_zoneset(activeZoneSetName) self._cfg_save() except exception.FabricTransactionException: with excutils.save_and_reraise_exception(): self.error(_("Deleting zones failed, cmd %s"), cmd) except Exception: with excutils.save_and_reraise_exception(): self.error(_("Deleting zones failed, cmd %s"), cmd) self._cfg_trans_abort() return (True, ZoneConstant.SUCCESS)
def _do_get_san_context(fabric_name, conn, wwn_sets): nsinfo = conn.get_nameserver_info() LOG.info(_("Returned data from get_nameserver_info() is %(info)r") % dict(info=nsinfo)) nsset = set(nsinfo) found_wwns = {} for name, wwn_set in wwn_sets.iteritems(): found_wwns[name] = list(nsset & wwn_set) return found_wwns
def _parse_ns_output(self, switchData): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :return list of device port wwn from ns info """ returnlist = [] for line in switchData: if not(" NL " in line or " N " in line): continue linesplit = line.split(';') if len(linesplit) > 2: nodePortWwn = linesplit[2] returnlist.append(nodePortWwn) else: self.error( _("Malformed nameserver output is: %s"), switchData) raise exception.InvalidParameterValue( err=_("Malformed nameserver info")) return returnlist
def wrapped(self, *args, **kwargs): try: return self._retry_if_problems(func, self, *args, **kwargs) except exception.FabricException: # Exception is already friendly, don't do anything raise except Exception as e: # Wrap the exception up with details of the failing fabric and # an indication of what we were doing. problem = _(message) LOG.exception(e) raise exception.FabricUnknownException(self.exception_desc, problem, e)
def close_connection(self): """This will close the client connection.""" # Do nothing if there is no connection in existence. if self.client is None: return try: self.client.close() except Exception as e: # just log and move on. msg = _("Failed closing SSH connection %s") % e self.warn(msg) finally: self.client = None