def send_migration_failure_notification(context, instance, tgt_host, exception): """ Sends a notification of live migration failure to the GUI :param context: security context :param instance: The instance that was migrating :param tgt_host: The target host name :param exception: The exception that was thrown """ if hasattr(exception, 'message'): err_msg = _('%s') % exception.message else: err_msg = _('%s') % exception # Send error notification info = {'msg': _('Migration of virtual machine {instance_name} ' 'to host {host_name} failed. ' '{error}'), 'instance_id': instance['uuid'], 'instance_name': instance['display_name'], 'host_name': tgt_host, 'error': err_msg} notifier = rpc.get_notifier(service='compute', host=CONF.host) notifier.error(context, 'compute.instance.log', info)
def set_instance_error_state_and_notify(instance): """ Set an instance to ERROR state and send out a notification """ # set instance to error state. Instance # could be a dictionary when this method is called during # virtual machine delete process. instance['vm_state'] = vm_states.ERROR conductor.API().instance_update( context.get_admin_context(), instance['uuid'], vm_state=vm_states.ERROR, task_state=None) instance_name = instance['name'] host_name = instance['host'] LOG.warn(_('Unable to find virtual machine %(inst_name)s ' 'on host %(host)s. Set state to ERROR') % {'inst_name': instance_name, 'host': host_name}) # Send event notification note = {'event_type': 'compute.instance.log', 'msg': _('Unable to find virtual machine {instance_name} on ' 'host {host_name}. An operation might have been ' 'performed on the virtual machine outside of PowerVC or' ' the deploy of the virtual machine failed.' 'The virtual machine is now set to Error state in the ' 'database.'), 'instance_name': instance_name, 'host_name': host_name} notifier = rpc.get_notifier(service='compute', host=host_name) notifier.warn(context.get_admin_context(), 'compute.instance.log', note)
def _hostname_prefix(self, hostname_str): """Translate connector info to storage system host name. Create a storage compatible host name based on the host shortname, replacing any invalid characters (at most 55 Characteristics) and adding a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. :param hostname_str: host shortname in string format :return: generated hostname for storage """ if not hostname_str or len(hostname_str) == 0: msg = _("Invalid Hostname: %(hostname_str)s for storage") % \ locals() LOG.exception(msg) ex_args = {'hostname': hostname_str} raise SVCInvalidHostnameError(**ex_args) if isinstance(hostname_str, unicode): hostname_str = hostname_str.translate( self._unicode_host_name_filter) elif isinstance(hostname_str, str): hostname_str = hostname_str.translate( self._string_host_name_filter) else: msg = _("Cannot clean host name: %(hostname_str)s for storage") % \ locals() LOG.exception(msg) ex_args = {'hostname': hostname_str} raise SVCInvalidHostnameError(**ex_args) hostname_str = str(hostname_str) return hostname_str[:55]
def _is_instance_on_source_host(context, source_host, instance_uuid, instance_name): """ While handling possible out of band migration, we send a message to source host, asking if instance is still on the source host. """ # Cooperative yield greenthread.sleep(0) answer = api.PowerVCComputeRPCAPI().is_instance_on_host(context, instance_uuid, instance_name, source_host) if answer: LOG.info(_('Virtual machine %(inst)s is being managed by remote host ' '%(host)s. This could indicate the virtual machine is on ' 'two hosts simultaneously after migration ' 'outside of PowerVC') % {'inst': instance_name, 'host': source_host}) else: LOG.info(_('Instance %(inst)s is not being managed by remote ' 'host %(host)s.') % {'inst': instance_name, 'host': source_host}) return answer
def _svc_command(self, command): """ Method to execute command to remote system using ssh protocol. It support both password and key authentication :param connection: An active ssh connection :param command: Command text to execute :returns: List of lines returned on stdout """ self._set_svc_connection() self._check_connection() try: LOG.debug("Running cmd: %s" % command) stdin, stdout, stderr = self._connection.exec_command(command) output = stdout.read().splitlines() err_output = stderr.read().splitlines() LOG.debug("SVC command [%s] returned stdout: %s stderr: %s" % (command, output, err_output)) if err_output: LOG.warn(_("Command %(cmd)s returned with stderr: %(err)s") % dict(cmd=command, err=err_output)) return (output, err_output) except Exception, e: ex_args = {'cmd': command, 'e': e} LOG.exception(_("Error while running command %(cmd)s: %(e)s") % ex_args) raise SVCCommandException(**ex_args)
def create_config_drive_iso(instance, injected_files, admin_password, network_info): """" If a config drive is required by the instance it will create a config drive ISO file and returns the path to the file. Otherwise it will return None @param instance: the VM instance @param injected_files: files specified to be injected on the VM spawn method @param admin_password: Admin password specified on the VM spawn call @param network_info: network_info passed to the VM spawn call """ if configdrive.required_by(instance): LOG.info(_("Using config drive"), instance=instance) extra_md = {} if admin_password: extra_md["admin_pass"] = admin_password inst_md = instance_metadata.InstanceMetadata( instance, content=injected_files, extra_md=extra_md, network_info=network_info ) local_img_dir = CONF.powervm_img_local_path base_name = "%s_config.iso" % instance["name"] configdrive_path = os.path.join(local_img_dir, base_name) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: LOG.info(_("Creating config drive at %(path)s"), {"path": configdrive_path}, instance=instance) try: cdb.make_drive(configdrive_path) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_("Creating config drive failed " "with error: %s"), e, instance=instance) return configdrive_path
def _svc_connect(self, connectionDic, skip_connect=False): """ Method to connect to remote system using ssh protocol. It support both password and key authentication :param connectionDic: A dictionary with connection info :returns: Active ssh connection """ max_retries = CONF.svc_ssh_connect_retries for retry_count in range(max_retries): try: ssh = paramiko.SSHClient() ssh.load_host_keys(CONF.svc_known_hosts_path) ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) port = connectionDic[SSH_PORT] if not skip_connect: if SSH_KEY_PRIVATE_KEY in connectionDic: keyPath = connectionDic[SSH_KEY_PRIVATE_KEY] key_file = os.path.expanduser(keyPath) # Paramiko doesn't support DSA keys private_key = paramiko.RSAKey.from_private_key_file( key_file) ssh.connect(connectionDic[SSH_KEY_HOST], username=connectionDic[SSH_KEY_USERNAME], port=port, pkey=private_key) else: ssh.connect(connectionDic[SSH_KEY_HOST], username=connectionDic[SSH_KEY_USERNAME], password=connectionDic[SSH_KEY_PASSWORD], port=port) # send TCP keepalive packets every 20 seconds transport = ssh.get_transport() transport.set_keepalive(int(connectionDic[SSH_KEEP_ALIVE])) if not transport.is_active(): LOG.debug("svc connection failed with %s" % connectionDic[SSH_KEY_HOST]) else: LOG.debug("svc connection established with %s" % connectionDic[SSH_KEY_HOST]) # add connection info for re-establishing if broken ssh.connDict = connectionDic return ssh except SSHException as ssh: if((retry_count < max_retries - 1) and ('Error reading SSH protocol banner' in ssh.message)): LOG.debug("SSH connection to SVC failed. Retrying.") else: LOG.exception(_("SSH Exception while trying to connect")) raise except Exception: LOG.exception(_("Unknown exception while trying to connect")) raise
def _neutron_unused_port_cleanup(context): """ This task periodically runs to check if there are any 'stale' neutron ports that need cleanup or not and eventually cleans it up. 'stale' ports are those which are not in use by any of the instance and hence should be freed up for deploys if they exit :param context: The context object. """ LOG.debug('pvc_nova.compute.manager.PowerVCComputeManager ' '_neutron_unused_port_cleanup: Cleaning up unused neutron ' 'ports...') #Get all the running instances from the DB which aren't deleted. ports_ids_to_delete = [] try: # Expensive! db_instances = nova_db.instance_get_all(context) # Get all the neutron ports. Expensive! network_data = neutronv2.get_client(context).list_ports() ports = network_data.get('ports', []) # We run through the list of ports and see if they have a device_id # If the device_id exists, we see if they are in use with an # instance or not, if No, then we delete them. for port in ports: found_server = False for instance in db_instances: if port.get('device_id', None) is not None and\ port['device_id'] == instance['uuid']: found_server = True break # Only delete ports that are owned by Compute. Do NOT # delete ports owned by say SCE. dev_owner = port.get('device_owner', None) owned_by_compute = False if dev_owner is not None and dev_owner == 'compute:None': owned_by_compute = True if not found_server and owned_by_compute: ports_ids_to_delete.append(port['id']) LOG.info(_('Deleting neutron port with id %(id)s and data ' '%(data)s') % {'id': port['id'], 'data': str(port)}) except Exception as exc: LOG.exception(exc) # Now iterate the legit candidates for deletion and delete them. for port_id in ports_ids_to_delete: try: neutronv2.get_client(context).delete_port(port_id) LOG.warning(_('Cleaning up the unused neutron port with id ' '%(port)s') % {'port': port_id}) except Exception as exc: LOG.exception(exc) LOG.debug('Exiting neutron port clean up')
def get_vdisk_map_by_uid(self, vdisk_uid): """ return the vdisk's hostmap information :param vdisk_uid: vdisk_UID to lookup :return: host,scsi_id dictionary or None """ if not vdisk_uid: msg = _("Invalid parameter: vdisk_uid is None ") LOG.exception(msg) raise SVCInvalidVdiskUID map_info = {} cmd = 'svcinfo lshostvdiskmap -delim :' output, err_output = self._svc_command(cmd) if err_output: msg = (_("get_vdisk_map_scsi_id() failure: svc cmd: %(cmd)s" " error: %(error)s") % {'cmd': cmd, 'error': err_output}) LOG.exception(msg) ex_args = {'cmd': cmd, 'e': err_output} raise SVCCommandException(**ex_args) if len(output) < 2: msg = _("_add_svc_hostmap() failed to create " "vdisk host mapping on storage") LOG.warn(msg) return map_info header = output.pop(0).split(':') scsi_id_idx = header.index(SVC_KEY_MAP_SCSI_ID) vdisk_uid_idx = header.index(SVC_KEY_VDISK_UID) hostname_idx = header.index(SVC_KEY_HOST_NAME) for line in output: info = line.split(':') host = info[hostname_idx] vdisk_UID = info[vdisk_uid_idx] scsi_id = info[scsi_id_idx] # we may have one LUN mapped to multiple hosts during LPM if vdisk_UID == vdisk_uid: map_info[host] = scsi_id if len(map_info) == 0: map_info = None LOG.debug("vdisk hostmap: %(map_info)s" % {'map_info': str(map_info)}) return map_info
def map_ports_to_fabrics(self, context, wwpn_list, give_message=False): """ Given a list of FC Port WWPNs, this method calls to cinder to return a mapping of those wwpns to a registered fabric designation that each port is logged into. If a port is not found logged into a registered switch, its mapping will be None. Example: Input: [ "10000090FA2A5866", "10000090FA2A8923", "c0507606d56e03af" ] Returns: { "10000090FA2A8923": "B", "10000090FA2A5866": "A", "c0507606d56e03af": None } """ cinder_c = cinder.cinderclient(context) # Adjust the retries down to 0 so as not to waste time on nova client # side retries. The cinder brocade client has it's own set of # configurable retry logic. In addition, the host storage topology # reconciliation compute task will be retrying this on its periodic # interval. cinder_c.client.retries = 0 try: urlpath = "/san-fabrics/map_ports" args = {'body': {'wwpn_list': wwpn_list}} info = cinder_c.client.get(urlpath, **args) except Exception as e: LOG.exception(e) msg = _("Unable to get port-to-fabric map from cinder service: " "%s") % (e.__class__.__name__ + " - " + _('%s') % e) LOG.warn(msg) if give_message: return msg return None # info will be returned as a tuple and we expect the second element # to contain the fabrics dictionary. if (len(info) != 2) or (not "wwpn_mapping" in info[1]): LOG.warn(str(info)) msg = _("Unable to get port-to-fabric map from cinder service. " "Unexpected data: %s") % str(info) LOG.warn(msg) if give_message: return msg return None LOG.debug("GET cinder '%s' returns: %s" % (urlpath, str(info))) return info[1]['wwpn_mapping']
def _check_connection(self): for attempt in range(5): try: self._validate_transport() transport = self._connection.get_transport() if transport and transport.is_active(): return else: LOG.warn(_('Transport missing or not active: %s') % attempt) except Exception: # Sleep and re-attempt time.sleep(5) LOG.warn(_('Max conn loop exceeded to SVC.')) return
def get_volume_info(self, uid): """ Function that returns dictionary with volume_info. :param self The object pointer. :param uid The unique id of the volume. :return Dictionary with volume info. """ LOG.debug("Entering") cmd = "svcinfo lsvdisk -bytes -filtervalue vdisk_UID=%s -delim :" % uid output = self._svc_command(cmd)[0] if len(output) != 2: raise SVCVolumeNotFound( _("Couldn't find volume information for UID %s") % uid) header = output[0].split(':') values = output[1].split(':') index = header.index(SVC_KEY_VDISK_ID) diskId = values[index] index = header.index(SVC_KEY_VDISK_NAME) name = values[index] index = header.index(SVC_KEY_VOLUME_GROUP) volumeGroup = values[index] index = header.index(SVC_KEY_VDISK_CAPACITY) capacity = values[index] info = {SVC_KEY_VDISK_ID: diskId, SVC_KEY_VDISK_NAME: name, SVC_KEY_VOLUME_GROUP: volumeGroup, SVC_KEY_VDISK_CAPACITY: capacity} LOG.debug("Exiting") return info
def get_active_target_wwpns(self): """ Get storage target WWPNs that are connected to SAN fabric. :return: A list of active target WWPNs in the form of uppercase hex string or None """ # lsportfc is supported since SVC f/w version 6.4.0.0 cmd = 'svcinfo lsportfc -filtervalue status=active:type=fc -delim :' output, error = self._svc_command(cmd) # output looks like this: # id:fc_io_port_id:port_id:type:port_speed:node_id:node_name:WWPN: # nportid:status:attachment # 0:1:1:fc:8Gb:1:node1:500507680304104A:010200:active:switch # 1:2:2:fc:8Gb:1:node1:500507680308104A:010200:active:switch if len(output) < 2: return None if error: msg = _("get_active_target_wwpns() failure, cmd=%(cmd)s" " error=%(error)s") % locals() LOG.error(msg) return None target_wwpns = [] header = output.pop(0).split(':') wwpn_idx = header.index(SVC_KEY_PORT_WWPN) for line in output: info = line.split(':') target_wwpns.append(info[wwpn_idx]) return target_wwpns
def change_sea_virt_adapters_cmd(seaname, pveaname, virt_list): """ Change the SEA device with the updated virt_list. :param seaname: SEA devname for chdev :param pveaname: pvid_adapter devname :param virt_list: virt_adapters list :returns: A VIOS command to change the attribute of a given SEA """ virt_list = [] if not virt_list else virt_list if len(virt_list) > 0 and not isinstance(virt_list, list): raise TypeError(_('change_sea_virt_adapters_cmd(): virt_list' ' is not list.')) additional_adapters = "" if len(virt_list) > 0: additional_adapters = ',' + ','.join(virt_list) return ('ioscli chdev -dev %(sea)s -attr ' 'virt_adapters=%(pvea)s%(virtlist)s ' 'pvid_adapter=%(pvea)s' % {'sea': seaname, 'pvea': pveaname, 'virtlist': additional_adapters})
def _find_default_with_no_vlan(self, primary_seas): """ This method finds the default adapter when there's no vlanid specified. The default SEA is the one that has the lowest pvid in all the primary seas. :param: primary_seas: This is a list of all primary_seas for a given host :return: A default adapter. Note there can be only one in this case? """ lowest_sea = None # A None should never be returned however low = 4096 for i in range(0, len(primary_seas)): # if the sea is not available - we need to find the next available if primary_seas[i].pvid < low and primary_seas[i].is_available(): low = primary_seas[i].pvid lowest_sea = primary_seas[i] msg = (ras.vif_get_msg ('info', 'LOWEST_PVID') % {'lowest_sea': lowest_sea.name}) ras.function_tracepoint(LOG, __name__, ras.TRACE_INFO, msg) # Let's say that none of the seas are available, in this case we pick # anyone and return if lowest_sea is None and len(primary_seas) >= 1: lowest_sea = primary_seas[0] LOG.info(_('None of the seas are in available state, picking %s' 'as default' % lowest_sea.name)) return lowest_sea
def __init__(self, message=None, **kwargs): if not message: message = _("Resize operation failed: Disk size cannot be " "decreased. Disk size requested (%(req_size)i GB) is" " below the current disk size " "(%(disk_size)i GB).") % kwargs super(IBMPowerVMDiskResizeBelowExisting, self).__init__(message)
def _discover_vios_config(self): """ This function will discover the SEA configuration on the managed VIOSes. If it detects any faulty configuration, an exception will be thrown. The exception should include data on what the issue was. """ ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Enter") try: # Get all the VIOS under this host, and verify we have at least one vio_servers = self._get_all_vios() if not vio_servers: ras.function_tracepoint(LOG, __name__, ras.TRACE_ERROR, ras.vif_get_msg('error', 'VIOS_NONE') % self.host_name) raise excp.IBMPowerVMInvalidHostConfig(attr='vios') # Loop through every VIOS on the host. for vios in vio_servers: # See if we find some adapters if not self._populate_adapters_into_vios(vios): # Found no adapters... this could be fine, but log it. ras.function_tracepoint(LOG, __name__, ras.TRACE_WARNING, vios.lpar_name + ': ' + ras.vif_get_msg('error', 'VIOS_NOADAPTERS')) # If we get here, we've found all adapters, added them to their # respective VioServer, and verified every VioServer has at least # one SharedEthernetAdapter. Create the Host object with those # VioServers and we're done! self.host = dom.Host(self.host_name, vio_servers) # Update the available pool of VLAN IDs self.host.maintain_available_vid_pool() # except K2Error as e: # Bug0002104,NameError: global name 'K2Error' is not defined # # If this was a K2Error, we want to reraise it so we don't put # # out a message about an invalid configuration, which is misleading # if e.k2response is not None: # LOG.error(_('Request headers:')) # LOG.error(e.k2response.reqheaders) # LOG.error(_('Request body:')) # LOG.error(e.k2response.reqbody) # LOG.error(_('Response headers:')) # LOG.error(e.k2response.headers) # LOG.error(_('Response body:')) # LOG.error(e.k2response.body) # raise except Exception as e: msg = (ras.vif_get_msg('error', 'VIOS_UNKNOWN') + " (" + (_('%s') % e) + ")") ras.function_tracepoint(LOG, __name__, ras.TRACE_EXCEPTION, msg)
def __init__(self, context, inst_uuid, progress_val=0): """ initiate instance progress stored in the dictionary. :param context: context to use :param inst_uuid: instance uuid :param progress_val: initial progress value """ @synchronized('progress_'.join(inst_uuid), 'nova-prog-') def _init_inst_progress(): global __INST_PROGRESS__ if inst_uuid in __INST_PROGRESS__: # raise an exception since the uuid has been used LOG.error(_("The progress updater has already started for " "virtual machine %s.") % inst_uuid) raise pvcex.IBMPowerVMProgressUpdateError(uuid=inst_uuid) self._inst_uuid = inst_uuid self._context = context progress = progress_val if not isinstance(progress, numbers.Number): progress = round(0) else: progress = round(progress_val) if progress > 100: progress = round(100) __INST_PROGRESS__[self._inst_uuid] = progress conductor.API().instance_update(self._context, self._inst_uuid, progress=progress) if not inst_uuid: # raise an exception since the uuid is blank LOG.error(_("The progress updater requires virtual machine uuid")) raise pvcex.IBMPowerVMProgressUpdateError(uuid='') _init_inst_progress()
def load_from_conf_data(conf_data): """LPAR configuration data parser. The configuration data is a string representation of the attributes of a Logical Partition. The attributes consists of name/value pairs, which are in command separated value format. Example format: name=lpar_name,lpar_id=1,lpar_env=aixlinux :param conf_data: string containing the LPAR configuration data. :returns: LPAR -- LPAR object. """ # config_data can contain comma separated values within # double quotes, example: virtual_serial_adapters # and virtual_scsi_adapters attributes. So can't simply # split them by ','. cf_splitter = shlex.shlex(conf_data, posix=True) cf_splitter.whitespace = ',' cf_splitter.whitespace_split = True attribs = dict(item.split("=") for item in list(cf_splitter)) lpar = LPAR() for (key, value) in attribs.items(): try: lpar[key] = value except exception.IBMPowerVMLPARAttributeNotFound: LOG.info(_('Encountered unknown LPAR attribute: %s ' 'Continuing without storing.') % key) return lpar
def _notify_out_of_band_change(context, instance): """Helper method to Notify when Out-of-Band Changes occur""" # First write out a statement to the log file saying it changed LOG.info(_('Resource allocation of instance %(instance_name)s ' 'on host %(host_name)s has been modified.') % {'instance_name': instance['display_name'], 'host_name': instance['host']}) # Construct a translatable message to log and send in the notification msg = _('Resource allocation of instance {instance_name} ' 'on host {host_name} has been modified.') # Construct the full payload for the message that is being sent info = {'msg': msg, 'instance_id': instance['uuid']} info['instance_name'] = instance['display_name'] info['host_name'] = instance['host'] # Send the notification that the Allocations changed Out-of-Band notifier = rpc.get_notifier(service='compute', host=instance['host']) notifier.info(context, 'compute.instance.log', info)
def __init__(self, instance, volume_id, error): message = _("Could not attach volume %(volume_id)s to virtual server " "%(instance_name)s (%(instance_id)s); %(error)s") \ % {"volume_id": volume_id, "instance_name": instance['name'], "instance_id": instance['id'], "error": error} super(IBMPowerVMVolumeAttachFailed, self).__init__(message)
def _wait_for_clone(self, context, image_volume, count): count['count'] += 1 tries = count['count'] vol = self.volume_api.get(context, image_volume['id']) if vol['status'] == 'available' or vol['status'] == 'in-use': msg = (_("The allocation of the image volume was successful: " "image volume %(image_volume)s after tries=%(tries)s") % locals()) LOG.info(msg) raise loopingcall.LoopingCallDone() elif (vol['status'] == 'error' or tries > CONF.volume_status_check_max_retries): msg = (_("The allocation of the image volume either failed or " "timed out: " "image volume %(image_volume)s tries=%(tries)s") % locals()) LOG.warn(msg) raise loopingcall.LoopingCallDone()
def reset_task_state(): try: conductor.API().instance_update(context, instance['uuid'], task_state=None, expected_task_state=ACTIVATING) except Exception: LOG.warning(_('Unable to reset task state to None for instance %s') % instance['name'])
def notify_unmanage(self, context, host, mgmt_ip): """Notifies this Management System to remove Host Management""" info = dict(hostname=host, ip=mgmt_ip) try: #Log a message for debug purposes that were are removing the Host LOG.info(_("Removing Host %(hostname)s, switching " "to Management System %(ip)s...") % info) #Generate a Context with a Token to use for the Requests context = self._generate_admin_context() #If the Compute Node doesn't exist, we don't need to notify comp_node = self._get_compute_node(context, host) if comp_node is None: return #Notify the old Management System is no longer managing the host text = _("The PowerVC management system at %(ip)s is taking over " "management of host %(hostname)s. The host will be " "removed from this management system.") % info anotifier = rpc_api.get_notifier('compute', host) anotifier.info(context, 'compute.instance.log', {'msg': text}) try: __import__(HOST_REG) #Call the Host Registrar to do the full clean-up of the Host get_registrar = getattr(sys.modules[HOST_REG], 'get_registrar') registrar = get_registrar(context, host_name=host) registrar.skip_remote_commands = True registrar.deregister(force=True) except Exception as ex: LOG.warn(_("Exception trying to fully remove the Host.")) LOG.exception(ex) #Send a notification that we are removing the Compute Node anotifier.info(context, 'compute.node.delete.start', comp_node) #Fall-back to just cleaning the DB, if the main flow failed hostfact = compute_dom.ManagedHostFactory.get_factory() hostfact.delete_host(context, host) #Send a notification that we removed the Compute Node anotifier.info(context, 'compute.node.delete.end', comp_node) #Log a message for debug purposes that we removed the Host LOG.info(_("Removed Host %(hostname)s, switching " "to Management System %(ip)s.") % info) except Exception as exc: #Log the Exception that occurred while trying to Remove the Host LOG.warn(_("Failed to remove Host %(hostname)s while " "switching to Management System %(ip)s") % info) LOG.exception(exc)
def get_fabrics(self, context): """ Retrieve the list of fabric details from cinder. Return a dictionary keyed by fabric name. Example return: { 'A': { "fabric_name": "A", "fabric_display_name": "fab184", "access_ip": "9.114.181.184", "user_id": "admin" }, 'B': { "fabric_name": "B", "fabric_display_name": "fab185", "access_ip": "9.114.181.185", "user_id": "admin" } { """ fabrics = [] cinder_c = cinder.cinderclient(context) try: urlpath = "/san-fabrics/detail" info = cinder_c.client.get(urlpath) except Exception as e: LOG.exception(e) msg = _("Unable to get fabrics from cinder service: " "%s") % (e.__class__.__name__ + " - " + _('%s') % e) LOG.warn(msg) return fabrics # info will be returned as a tuple and we expect the second element # to contain the fabrics dictionary. if (len(info) != 2) or (not "fabrics" in info[1]): LOG.warn(str(info)) msg = _("Unable to get fabrics from cinder service. Unexpected " "data: %s") % str(info[0]) LOG.warn(msg) LOG.debug("GET cinder '%s' returns: %s" % (urlpath, str(info))) name_to_fab = {} for fabric in info[1]['fabrics']: name_to_fab[fabric['fabric_name']] = fabric return name_to_fab
def _validate_transport(self): # Get the transport so we can see if it's active transport = self._connection.get_transport() try: # if we have a dead connection # build a new one and return if not transport or (not transport.is_active()): self._connection = self._svc_connect(self._svc_connect_data) transport = self._connection.get_transport() LOG.debug("No SVC SSH transport or transport is not active, " "reconnected.") except Exception: LOG.exception(_('Connection error connecting to SVC.')) raise Exception(_("Error connecting to SVC")) return
def create_new_volume(self, volumeInfo, change_name=True): """ Function that creates new volume. :param self The object pointer. :param volumeInfo Characteristics of the new volume. :return String with new volume name. """ size = volumeInfo.get(SVC_KEY_VDISK_CAPACITY) if (change_name): new_volume_name = self._get_new_volume_name( volumeInfo.get(SVC_KEY_VDISK_NAME)) else: new_volume_name = volumeInfo.get(SVC_KEY_VDISK_NAME) if SVC_KEY_VOLUME_GROUP in volumeInfo: volumeGroup = volumeInfo.get(SVC_KEY_VOLUME_GROUP) elif self.dft_stg_pool: volumeGroup = self.dft_stg_pool else: volumeGroup = self.get_mdisk_grp_by_size(size) if volumeGroup is None: raise SVCNoSANStoragePoolException # iogrp parameter should not use name since it could be # customized. It is always safe to use iogrp 0. cmd = "svctask mkvdisk -name %s -iogrp 0 -mdiskgrp %s " \ "-size %s -unit b" % (new_volume_name, volumeGroup, size) output, err_output = self._svc_command(cmd) volume_uid = self.get_uid(new_volume_name) # Check if it got created if not volume_uid: # The SVC message of out of space is not really user friendly. # So, we will manully check whether the pool ran out of space free_capacity = self.get_mdisk_grp_size(volumeGroup) if float(size) > float(free_capacity): ex_args = {'pool_name': volumeGroup, 'size': size, 'free_capacity': free_capacity} raise SVCVolumeGroupOutOfSpace(**ex_args) if err_output: ex_args = {'new_volume_name': new_volume_name, 'err_output': err_output} raise SVCVolumeCreationFailed(**ex_args) else: # failed to create volume but with no error msg # really shouldn't hit this condition ex_args = {'cmd': cmd, 'e': _("No error available")} raise SVCCommandException(**ex_args) return new_volume_name, volume_uid
def _wait_for_extend(self, context, volume, count): count['count'] += 1 tries = count['count'] vol = self.volume_api.get(context, volume['id']) if vol['status'] == 'in-use': raise loopingcall.LoopingCallDone() elif (vol['status'] == 'error_extending' or vol['status'] == 'error'): msg = (_("Error increasing size of volume: " "volume %(volume)s tries=%(tries)s") % locals()) LOG.warn(msg) raise loopingcall.LoopingCallDone()
def verify_dlpar_enabled(dlpar, instance_name): """ Dlpar must be enabled on the instance :param dlpar: Boolean value indicating dlpar is enabled :param intsance_name: The name of the instance (for logging) """ if not dlpar: error = (_("Cannot live migrate %s because DLPAR " "is not enabled.") % instance_name) LOG.exception(error) raise exception.IBMPowerVMMigrationFailed(error)
def create_host(self, wwpns, hostname): """ Create a new host on the storage system :param wwpns: The wwpns for the initiator as a hex string :param hostname: initiator's hostname. :param is_retry: whether this create_host operation is a retry. If so, raising exception if it fails. :return: host name defined on SVC storage if successfully created. """ if not wwpns or len(wwpns) == 0 or not hostname or len(hostname) == 0: ex_args = {'wwpns': wwpns, 'hostname': hostname} raise SVCCreateHostParameterError(**ex_args) ports = ':'.join(wwpns) # get the host shortname. hostname_str = hostname.split('.')[0] LOG.debug("enter: create_host(): wwpns=%(wwpns)s" " hostname=%(hostname)s" % {'wwpns': ports, 'hostname': hostname_str}) rand_id = str(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (self._hostname_prefix(hostname_str), rand_id) cmd = 'mkhost -name %(host_name)s -hbawwpn %(ports)s -force' % locals() output, err_output = self._svc_command(cmd) if err_output: # err_output should be a list type if isinstance(err_output, types.ListType): err_msg = err_output[0] else: err_msg = err_output err_code = err_msg.split()[0] if err_code and err_code == 'CMMVC6035E': # host has been defined on the storage, but we don't see it. # return None and ask caller to run cfgdev to relogin to SAN # and retry get_host_from_wwpns(). return None msg = (_("create_host() failure cmd=%(cmd)s, error:%(err_output)s." " Make sure host and storage are zoned properly and check" " SAN fabric connectivity") % locals()) LOG.exception(msg) ex_args = {'host_name': hostname_str, 'err_output': err_output} raise SVCCreateHostFailed(**ex_args) return host_name