def delete_volume(self, volume): """Destroy a zvol on appliance. :param volume: volume reference """ volume_name = self._get_zvol_name(volume['name']) try: props = self.nms.zvol.get_child_props(volume_name, 'origin') or {} self.nms.zvol.destroy(volume_name, '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Volume %s does not exist, it ' 'seems it was already deleted.'), volume_name) return if 'zvol has children' in exc.args[0]: LOG.info(_LI('Volume %s will be deleted later.'), volume_name) return raise origin = props.get('origin') if origin and self._is_clone_snapshot_name(origin): volume, snapshot = origin.split('@') volume = volume.lstrip('%s/' % self.configuration.nexenta_volume) try: self.delete_snapshot({'volume_name': volume, 'name': snapshot}) except exception.NexentaException as exc: LOG.warning(_LW('Cannot delete snapshot %(origin)s: %(exc)s'), {'origin': origin, 'exc': exc})
def create_volume_from_snapshot(self, volume, snapshot, method="COPY"): LOG.info(_LI("Creatng volume from snapshot. volume: %s"), volume["name"]) LOG.info(_LI("Source Snapshot: %s"), snapshot["name"]) self._ensure_shares_mounted() self.zfssa.create_volume_from_snapshot_file(src_file=snapshot["name"], dst_file=volume["name"], method=method) volume["provider_location"] = self.mount_path if volume["size"] != snapshot["volume_size"]: try: self.extend_volume(volume, volume["size"]) except Exception: vol_path = self.local_path(volume) with excutils.save_and_reraise_exception(): LOG.error( _LE( "Error in extending volume size: Volume: " "%(volume)s Vol_Size: %(vol_size)d with " "Snapshot: %(snapshot)s Snap_Size: " "%(snap_size)d" ), { "volume": volume["name"], "vol_size": volume["size"], "snapshot": snapshot["name"], "snap_size": snapshot["volume_size"], }, ) self._execute("rm", "-f", vol_path, run_as_root=True) volume_origin = {"origin": snapshot["volume_name"]} self.zfssa.set_file_props(volume["name"], volume_origin) return {"provider_location": volume["provider_location"]}
def do_setup(self, context): """Setup and verify HDS HNAS storage connection.""" self.context = context (self.arid, self.hnas_name, self.lumax) = self._array_info_get() self._check_hdp_list() service_list = self.config['services'].keys() for svc in service_list: svc = self.config['services'][svc] pool = {} pool['pool_name'] = svc['volume_type'] pool['service_label'] = svc['volume_type'] pool['hdp'] = svc['hdp'] self.pools.append(pool) LOG.info(_LI("Configured pools: %s"), self.pools) iscsi_info = self._get_iscsi_info() LOG.info(_LI("do_setup: %s"), iscsi_info) for svc in self.config['services'].keys(): svc_ip = self.config['services'][svc]['iscsi_ip'] if svc_ip in iscsi_info.keys(): LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip) self.config['services'][svc]['port'] = \ iscsi_info[svc_ip]['port'] self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl'] self.config['services'][svc]['iscsi_port'] = \ iscsi_info[svc_ip]['iscsi_port'] else: # config iscsi address not found on device! LOG.error(_LE("iSCSI portal not found " "for service: %s"), svc_ip) raise exception.ParameterNotFound(param=svc_ip)
def delete_snapshot(self, snapshot): """Delete volume's snapshot on appliance. :param snapshot: snapshot reference """ volume_name = self._get_zvol_name(snapshot['volume_name']) snapshot_name = '%s@%s' % (volume_name, snapshot['name']) try: self.nms.snapshot.destroy(snapshot_name, '') except exception.NexentaException as exc: if "does not exist" in exc.args[0]: LOG.info(_LI('Snapshot %s does not exist, it seems it was ' 'already deleted.'), snapshot_name) elif "snapshot has dependent clones" in exc.args[0]: LOG.info(_LI('Snapshot %s has dependent clones, will be ' 'deleted later.'), snapshot_name) else: raise ctxt = context.get_admin_context() try: self.db.volume_get(ctxt, snapshot['volume_name']) except exception.VolumeNotFound: LOG.info(_LI('Origin volume %s appears to be removed, try to ' 'remove it from backend if it is there.')) if self.nms.volume.object_exists(volume_name): self.nms.zvol.destroy(volume_name, '')
def initialize_connection(self, volume, connector): """Map the created volume to connector['initiator']. :param volume: dictionary volume reference :param connector: dictionary connector reference """ LOG.info(_LI("initialize volume %(vol)s connector %(conn)s"), {'vol': volume, 'conn': connector}) # connector[ip, host, wwnns, unititator, wwp/ service_info = self._get_service_target(volume) (ip, ipp, ctl, port, _hdp, tgtalias, secret) = service_info info = _loc_info(volume['provider_location']) if 'tgt' in info.keys(): # spurious repeat connection # print info.keys() LOG.debug("initiate_conn: tgt already set %s", info['tgt']) (arid, lun_name) = info['id_lu'] loc = arid + '.' + lun_name # sps, use target if provided try: out = self.bend.add_iscsi_conn(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], lun_name, _hdp, port, tgtalias, connector['initiator']) except processutils.ProcessExecutionError: msg = _("Error attaching volume %s. " "Target limit might be reached!") % volume['id'] raise exception.ISCSITargetAttachFailed(message=msg) hnas_portal = ip + ':' + ipp # sps need hlun, fulliqn hlun = out.split()[1] fulliqn = out.split()[13] tgt = hnas_portal + ',' + tgtalias + ',' + loc + ',' + ctl + ',' tgt += port + ',' + hlun LOG.info(_LI("initiate: connection %s"), tgt) properties = {} properties['provider_location'] = tgt self._update_vol_location(volume['id'], tgt) properties['target_discovered'] = False properties['target_portal'] = hnas_portal properties['target_iqn'] = fulliqn properties['target_lun'] = hlun properties['volume_id'] = volume['id'] properties['auth_username'] = connector['initiator'] if self.config['chap_enabled'] == 'True': properties['auth_method'] = 'CHAP' properties['auth_password'] = secret conn_info = {'driver_volume_type': 'iscsi', 'data': properties} LOG.debug("initialize_connection: conn_info: %s.", conn_info) return conn_info
def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" LOG.info(_LI('Entering initialize_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s') % {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) initiator_name = connector['initiator'] initiator_group_name = self._get_igroupname_for_initiator( initiator_name) if not initiator_group_name: initiator_group_name = self._create_igroup_for_initiator( initiator_name) LOG.info(_LI('Initiator group name is %(grp)s for initiator %(iname)s') % {'grp': initiator_group_name, 'iname': initiator_name}) self.APIExecutor.add_acl(volume, initiator_group_name) (iscsi_portal, iqn, lun_num) = volume['provider_location'].split() properties = {} properties['target_discovered'] = False # whether discovery was used properties['target_portal'] = iscsi_portal properties['target_iqn'] = iqn properties['target_lun'] = lun_num properties['volume_id'] = volume['id'] # used by xen currently return { 'driver_volume_type': 'iscsi', 'data': properties, }
def output_param_to_log(self, storage_protocol): essential_inherited_param = ['volume_backend_name', 'volume_driver'] conf = self.configuration LOG.info(basic_lib.set_msg(1, config_group=conf.config_group)) version = self.command.get_comm_version() if conf.hitachi_unit_name: prefix = 'HSNM2 version' else: prefix = 'RAID Manager version' LOG.info(_LI('\t%(prefix)-35s : %(version)s'), {'prefix': prefix, 'version': version}) for param in essential_inherited_param: value = conf.safe_get(param) LOG.info(_LI('\t%(param)-35s : %(value)s'), {'param': param, 'value': value}) for opt in volume_opts: if not opt.secret: value = getattr(conf, opt.name) LOG.info(_LI('\t%(name)-35s : %(value)s'), {'name': opt.name, 'value': value}) if storage_protocol == 'iSCSI': value = getattr(conf, 'hitachi_group_request') LOG.info(_LI('\t%(request)-35s : %(value)s'), {'request': 'hitachi_group_request', 'value': value})
def _check_response(self, response, request, is_get_request=True, params=None): if response.status_code == 401 or response.status_code == 403: LOG.info(_LI("Token is invalid, going to re-login and get " "a new one.")) login_request = ( "https://" + self.server_ip + ":" + self.server_port + "/api/login") verify_cert = self._get_verify_cert() r = requests.get( login_request, auth=( self.server_username, self.server_password), verify=verify_cert) token = r.json() self.server_token = token # Repeat request with valid token. LOG.info(_LI( "Going to perform request again %s with valid token."), request) if is_get_request: res = requests.get(request, auth=(self.server_username, self.server_token), verify=verify_cert) else: res = requests.post(request, data=json.dumps(params), headers=self._get_headers(), auth=(self.server_username, self.server_token), verify=verify_cert) return res return response
def smis_do_iscsi_discovery(self, volume): """Calls iscsiadm with each iscsi ip address in the list""" LOG.info(_LI("ISCSI provider_location not stored, using discovery.")) targets = [] if len(self.iscsi_ip_addresses) == 0: LOG.error(_LE("The list of iscsi_ip_addresses is empty")) return targets for iscsi_ip_address in self.iscsi_ip_addresses: out, _err, go_again, ex = self._call_iscsiadm(iscsi_ip_address) if not go_again: break if not out: if ex: exception_message = (_("Unsuccessful iscsiadm. " "Exception is %(ex)s. ") % {'ex': ex}) else: exception_message = (_("iscsiadm execution failed. ")) raise exception.VolumeBackendAPIException(data=exception_message) LOG.info(_LI( "smis_do_iscsi_discovery is: %(out)s."), {'out': out}) for target in out.splitlines(): targets.append(target) return targets
def do_setup(self, context): """Sets up and verify Hitachi HNAS storage connection.""" self.context = context self._check_fs_list() service_list = self.config['services'].keys() for svc in service_list: svc = self.config['services'][svc] pool = {} pool['pool_name'] = svc['volume_type'] pool['service_label'] = svc['volume_type'] pool['fs'] = svc['hdp'] self.pools.append(pool) LOG.info(_LI("Configured pools: %(pool)s"), {'pool': self.pools}) evs_info = self.backend.get_evs_info() LOG.info(_LI("Configured EVSs: %(evs)s"), {'evs': evs_info}) for svc in self.config['services'].keys(): svc_ip = self.config['services'][svc]['iscsi_ip'] if svc_ip in evs_info.keys(): LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip) self.config['services'][svc]['evs'] = ( evs_info[svc_ip]['evs_number']) self.config['services'][svc]['iscsi_port'] = '3260' self.config['services'][svc]['port'] = '0' else: LOG.error(_LE("iSCSI portal not found " "for service: %(svc)s"), {'svc': svc_ip}) raise exception.InvalidParameterValue(err=svc_ip)
def _snapshot_volume(self, vol_id, snapname): LOG.info(_LI("Snapshot volume %(vol)s into snapshot %(id)s.") % {'vol': vol_id, 'id': snapname}) params = { 'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]} req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/System/action/snapshotVolumes") % req_vars r = requests.post( request, data=json.dumps(params), headers=self._get_headers(), auth=( self.server_username, self.server_token), verify=self._get_verify_cert()) r = self._check_response(r, request, False, params) response = r.json() LOG.info(_LI("Snapshot volume response: %s."), response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for volume %(volname)s: " "%(response)s.") % {'volname': vol_id, 'response': response['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return {'provider_id': response['volumeIdList'][0]}
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol): """Copies src volume to dest volume.""" LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.") % {'src': src_vol['label'], 'dst': dst_vol['label']}) try: job = None job = self._client.create_volume_copy_job(src_vol['id'], dst_vol['volumeRef']) while True: j_st = self._client.list_vol_copy_job(job['volcopyRef']) if (j_st['status'] == 'inProgress' or j_st['status'] == 'pending' or j_st['status'] == 'unknown'): time.sleep(self.SLEEP_SECS) continue if (j_st['status'] == 'failed' or j_st['status'] == 'halted'): LOG.error(_LE("Vol copy job status %s."), j_st['status']) msg = _("Vol copy job for dest %s failed.")\ % dst_vol['label'] raise exception.NetAppDriverException(msg) LOG.info(_LI("Vol copy job completed for dest %s.") % dst_vol['label']) break finally: if job: try: self._client.delete_vol_copy_job(job['volcopyRef']) except exception.NetAppDriverException: LOG.warning(_LW("Failure deleting " "job %s."), job['volcopyRef']) else: LOG.warning(_LW('Volume copy job for src vol %s not found.'), src_vol['id']) LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def _get_bandwidth_limit(self, size, storage_type): try: max_bandwidth = self._find_limit(storage_type, scaleio.QOS_BANDWIDTH_LIMIT, scaleio.BANDWIDTH_LIMIT) if max_bandwidth is not None: max_bandwidth = (self._round_to_num_gran(int(max_bandwidth), units.Ki)) max_bandwidth = six.text_type(max_bandwidth) LOG.info(_LI("max bandwidth is: %s"), max_bandwidth) bw_per_gb = self._find_limit(storage_type, QOS_BANDWIDTH_PER_GB, BANDWIDTH_PER_GB) LOG.info(_LI("bandwidth per gb is: %s"), bw_per_gb) if bw_per_gb is None: return max_bandwidth # Since ScaleIO volumes size is in 8GB granularity # and BWS limitation is in 1024 KBs granularity, we need to make # sure that scaled_bw_limit is in 128 granularity. scaled_bw_limit = (size * self._round_to_num_gran(int(bw_per_gb), MIN_BWS_SCALING_SIZE)) if max_bandwidth is None or scaled_bw_limit < int(max_bandwidth): return six.text_type(scaled_bw_limit) else: return max_bandwidth except ValueError: msg = _("None numeric BWS QoS limitation") raise exception.InvalidInput(reason=msg)
def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" copy_success = False try: major, minor = self.zapi_client.get_ontapi_version() col_path = self.configuration.netapp_copyoffload_tool_path # Search the local image cache before attempting copy offload cache_result = self._find_image_in_cache(image_id) if cache_result: copy_success = self._copy_from_cache(volume, image_id, cache_result) if copy_success: LOG.info(_LI('Copied image %(img)s to volume %(vol)s ' 'using local image cache.'), {'img': image_id, 'vol': volume['id']}) # Image cache was not present, attempt copy offload workflow if (not copy_success and col_path and major == 1 and minor >= 20): LOG.debug('No result found in image cache') self._copy_from_img_service(context, volume, image_service, image_id) LOG.info(_LI('Copied image %(img)s to volume %(vol)s using' ' copy offload workflow.'), {'img': image_id, 'vol': volume['id']}) copy_success = True except Exception as e: LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e) finally: if not copy_success: super(NetAppCmodeNfsDriver, self).copy_image_to_volume( context, volume, image_service, image_id)
def _check_mode_get_or_register_storage_system(self): """Does validity checks for storage system registry and health.""" def _resolve_host(host): try: ip = na_utils.resolve_hostname(host) return ip except socket.gaierror as e: LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.') % {'host': host, 'e': e}) raise exception.NoValidHost( _("Controller IP '%(host)s' could not be resolved: %(e)s.") % {'host': host, 'e': e}) ips = self.configuration.netapp_controller_ips ips = [i.strip() for i in ips.split(",")] ips = [x for x in ips if _resolve_host(x)] host = na_utils.resolve_hostname( self.configuration.netapp_server_hostname) if host in ips: LOG.info(_LI('Embedded mode detected.')) system = self._client.list_storage_systems()[0] else: LOG.info(_LI('Proxy mode detected.')) system = self._client.register_storage_system( ips, password=self.configuration.netapp_sa_password) self._client.set_system_id(system.get('id'))
def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: dictionary volume reference :param new_size: int size in GB to extend """ nfs_mount = self._get_provider_location(volume['id']) path = self._get_volume_path(nfs_mount, volume['name']) # Resize the image file on share to new size. LOG.debug("Checking file for resize") if self._is_file_size_equal(path, new_size): return else: LOG.info(_LI("Resizing file to %sG"), new_size) image_utils.resize_image(path, new_size) if self._is_file_size_equal(path, new_size): LOG.info(_LI("LUN %(id)s extended to %(size)s GB."), {'id': volume['id'], 'size': new_size}) return else: raise exception.InvalidResults( _("Resizing image file failed."))
def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is a File System and some volume_name; e.g., openstack/vol_to_manage :param volume: cinder volume to manage :param existing_vol_ref: driver specific information used to identify a volume :returns: the provider location of the volume managed """ LOG.info(_LI("Asked to manage ISCSI volume %(vol)s, with vol " "ref %(ref)s."), {'vol': volume.id, 'ref': existing_vol_ref['source-name']}) fs_label, vol_name = ( self._get_info_from_vol_ref(existing_vol_ref['source-name'])) if volume.volume_type is not None: self._check_pool_and_fs(volume, fs_label) self.backend.rename_existing_lu(fs_label, vol_name, volume.name) LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."), {'name': volume.name}) return {'provider_location': self._get_provider_location(volume)}
def delete_snapshot(self, snapshot): """Deletes an rbd snapshot.""" # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are # utf-8 otherwise librbd will barf. volume_name = utils.convert_str(snapshot.volume_name) snap_name = utils.convert_str(snapshot.name) with RBDVolumeProxy(self, volume_name) as volume: try: volume.unprotect_snap(snap_name) except self.rbd.InvalidArgument: LOG.info( _LI("InvalidArgument: Unable to unprotect snapshot %s."), snap_name) except self.rbd.ImageNotFound: LOG.info( _LI("ImageNotFound: Unable to unprotect snapshot %s."), snap_name) except self.rbd.ImageBusy: children_list = self._get_children_info(volume, snap_name) if children_list: for (pool, image) in children_list: LOG.info(_LI('Image %(pool)s/%(image)s is dependent ' 'on the snapshot %(snap)s.'), {'pool': pool, 'image': image, 'snap': snap_name}) raise exception.SnapshotIsBusy(snapshot_name=snap_name) try: volume.remove_snap(snap_name) except self.rbd.ImageNotFound: LOG.info(_LI("Snapshot %s does not exist in backend."), snap_name)
def _get_service(self, volume): """Get service parameters. Get the available service parameters for a given volume using its type. :param volume: dictionary volume reference """ LOG.debug("_get_service: volume: %s", volume) label = utils.extract_host(volume['host'], level='pool') if label in self.config['services'].keys(): svc = self.config['services'][label] LOG.info(_LI("Get service: %(lbl)s->%(svc)s"), {'lbl': label, 'svc': svc['fslabel']}) service = (svc['hdp'], svc['path'], svc['fslabel']) else: LOG.info(_LI("Available services: %s"), self.config['services'].keys()) LOG.error(_LE("No configuration found for service: %s"), label) raise exception.ParameterNotFound(param=label) return service
def _get_service(self, volume): """Get service parameters. Get the available service parameters for a given volume using its type. :param volume: dictionary volume reference :returns: Tuple containing the service parameters (label, export path and export file system) or error if no configuration is found. :raises: ParameterNotFound """ LOG.debug("_get_service: volume: %(vol)s", {'vol': volume}) label = utils.extract_host(volume.host, level='pool') if label in self.config['services'].keys(): svc = self.config['services'][label] LOG.info(_LI("_get_service: %(lbl)s->%(svc)s"), {'lbl': label, 'svc': svc['export']['fs']}) service = (svc['hdp'], svc['export']['path'], svc['export']['fs']) else: LOG.info(_LI("Available services: %(svc)s"), {'svc': self.config['services'].keys()}) LOG.error(_LE("No configuration found for service: %(lbl)s"), {'lbl': label}) raise exception.ParameterNotFound(param=label) return service
def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.NotAuthorized): msg = six.text_type(ex_value) raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, (exception.Invalid, exception.NotFound)): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=six.text_type(ex_value))) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_LE( 'Exception handling resource: %s'), ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value)) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value)) raise Fault(ex_value) # We didn't handle the exception return False
def _unexport_lun(self, volume, target, connector): """Removes the export configuration for the given volume. The equivalent CLI command is "no lun export container <container_name> name <lun_name>" Arguments: volume -- volume object provided by the Manager """ v = self.common.vmem_mg LOG.info(_LI("Unexporting lun %(vol)s host is %(host)s"), {'vol': volume['id'], 'host': connector['host']}) try: self.common._send_cmd(v.lun.unassign_lun_from_iscsi_target, "Unassign device successfully", volume['id'], target, True) except exception.ViolinBackendErrNotFound: LOG.info(_LI("Lun %s already unexported, continuing"), volume['id']) except Exception: LOG.exception(_LE("LUN unexport failed!")) msg = _("LUN unexport failed") raise exception.ViolinBackendErr(message=msg)
def create_volume(self, volume): """Create a LU on HNAS. :param volume: dictionary volume reference """ service = self._get_service(volume) (_ip, _ipp, _ctl, _port, hdp, target, secret) = service out = self.bend.create_lu(self.config['hnas_cmd'], self.config['mgmt_ip0'], self.config['username'], self.config['password'], hdp, '%s' % (int(volume['size']) * units.Ki), volume['name']) LOG.info(_LI("create_volume: create_lu returns %s"), out) lun = self.arid + '.' + out.split()[1] sz = int(out.split()[5]) # Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created."), {'lun': lun, 'sz': sz}) return {'provider_location': lun}
def create_volume_from_snapshot(self, volume, snapshot, method='COPY'): LOG.info(_LI('Creatng volume from snapshot. volume: %s'), volume['name']) LOG.info(_LI('Source Snapshot: %s'), snapshot['name']) self._ensure_shares_mounted() self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'], dst_file=volume['name'], method=method) volume['provider_location'] = self.mount_path if volume['size'] != snapshot['volume_size']: try: self.extend_volume(volume, volume['size']) except Exception: vol_path = self.local_path(volume) with excutils.save_and_reraise_exception(): LOG.error(_LE('Error in extending volume size: Volume: ' '%(volume)s Vol_Size: %(vol_size)d with ' 'Snapshot: %(snapshot)s Snap_Size: ' '%(snap_size)d'), {'volume': volume['name'], 'vol_size': volume['size'], 'snapshot': snapshot['name'], 'snap_size': snapshot['volume_size']}) self._execute('rm', '-f', vol_path, run_as_root=True) volume_origin = {'origin': snapshot['volume_name']} self.zfssa.set_file_props(volume['name'], volume_origin) return {'provider_location': volume['provider_location']}
def request(self, url, method='GET', body=None, headers=None, ssl_verify=True, stream=False): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) parsed_url = urlparse.urlparse(url) port = parsed_url.port hostname = parsed_url.hostname scheme = parsed_url.scheme if netutils.is_valid_ipv6(hostname): hostname = "[%s]" % hostname relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query LOG.info(_LI("Doing %(method)s on %(relative_url)s"), {'method': method, 'relative_url': relative_url}) if body: LOG.info(_LI("Body: %s") % body) if port: _url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url) else: _url = "%s://%s%s" % (scheme, hostname, relative_url) response = requests.request(method, _url, data=body, headers=_headers, verify=ssl_verify, stream=stream) return response
def delete_volume(self, volume): """Deletes a logical volume. :param volume: volume reference """ nfs_share = volume.get('provider_location') if nfs_share: nms = self.share2nms[nfs_share] vol, parent_folder = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, parent_folder, volume['name']) mount_path = self.remote_path(volume).strip( '/%s' % self.VOLUME_FILE_NAME) if mount_path in self._remotefsclient._read_mounts(): self._execute('umount', mount_path, run_as_root=True) try: props = nms.folder.get_child_props(folder, 'origin') or {} nms.folder.destroy(folder, '-r') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Folder %s does not exist, it was ' 'already deleted.'), folder) return raise origin = props.get('origin') if origin and self._is_clone_snapshot_name(origin): try: nms.snapshot.destroy(origin, '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Snapshot %s does not exist, it was ' 'already deleted.'), origin) return raise
def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, dataset, volume['name']) try: nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not ' 'exist, it was already deleted.'), { 'folder': folder, 'snapshot': snapshot, }) return elif 'has dependent clones' in exc.args[0]: LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s has dependent ' 'clones, it will be deleted later.'), { 'folder': folder, 'snapshot': snapshot, }) return
def _detach_file(self, volume): name = self._get_volname(volume) devname = self._device_name(volume) vg = self._get_lvm_vg(volume) LOG.debug('Detaching device %s', devname) count = self._get_attached_count(volume) if count > 1: LOG.info(_LI('Reference count of %(volume)s is %(count)d, ' 'not detaching.'), {'volume': volume['name'], 'count': count}) return message = (_('Could not detach volume %(vol)s from device %(dev)s.') % {'vol': name, 'dev': devname}) with handle_process_execution_error( message=message, info_message=_LI('Error detaching Volume'), reraise=exception.VolumeBackendAPIException(data=message)): try: if vg is not None: self._do_deactivate(volume, vg) except putils.ProcessExecutionError: LOG.error(_LE('Could not deactivate volume group %s'), self._get_volname(volume)) raise try: self._do_detach(volume, vg=vg) except putils.ProcessExecutionError: LOG.error(_LE('Could not detach volume %(vol)s from device ' '%(dev)s.'), {'vol': name, 'dev': devname}) raise self._decrement_attached_count(volume)
def _update_info_from_dpkg(self): LOG.debug('Trying dpkg-query command.') try: _vendor = None out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", self.PACKAGE_NAME) if not out: LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'), {'pkg': self.PACKAGE_NAME}) return False # debian format: [epoch:]upstream_version[-debian_revision] deb_version = out # in case epoch or revision is missing, copy entire string _release = deb_version if ':' in deb_version: deb_epoch, upstream_version = deb_version.split(':') _release = upstream_version if '-' in deb_version: deb_revision = deb_version.split('-')[1] _vendor = deb_revision self._release = _release if _vendor: self._vendor = _vendor return True except Exception as e: LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), { 'msg': e}) return False
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol): """Copies src volume to dest volume.""" LOG.info( _LI("Copying src vol %(src)s to dest vol %(dst)s."), {"src": src_vol["label"], "dst": dst_vol["label"]} ) try: job = None job = self._client.create_volume_copy_job(src_vol["id"], dst_vol["volumeRef"]) while True: j_st = self._client.list_vol_copy_job(job["volcopyRef"]) if j_st["status"] == "inProgress" or j_st["status"] == "pending" or j_st["status"] == "unknown": time.sleep(self.SLEEP_SECS) continue if j_st["status"] == "failed" or j_st["status"] == "halted": LOG.error(_LE("Vol copy job status %s."), j_st["status"]) raise exception.NetAppDriverException(_("Vol copy job for dest %s failed.") % dst_vol["label"]) LOG.info(_LI("Vol copy job completed for dest %s."), dst_vol["label"]) break finally: if job: try: self._client.delete_vol_copy_job(job["volcopyRef"]) except exception.NetAppDriverException: LOG.warning(_LW("Failure deleting " "job %s."), job["volcopyRef"]) else: LOG.warning(_LW("Volume copy job for src vol %s not found."), src_vol["id"]) LOG.info(_LI("Copy job to dest vol %s completed."), dst_vol["label"])
def manage_existing(self, volume, existing_ref): """Manage an existing LeftHand volume. existing_ref is a dictionary of the form: {'source-name': <name of the virtual volume>} """ # Check API Version self._check_api_version() target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Check for the existence of the virtual volume. client = self._login() try: volume_info = client.getVolumeByName(target_vol_name) except hpexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) finally: self._logout(client) # Generate the new volume information based on the new ID. new_vol_name = 'volume-' + volume['id'] volume_type = None if volume['volume_type_id']: try: volume_type = self._get_volume_type(volume['volume_type_id']) except Exception: reason = (_("Volume type ID '%s' is invalid.") % volume['volume_type_id']) raise exception.ManageExistingVolumeTypeMismatch(reason=reason) new_vals = {"name": new_vol_name} client = self._login() try: # Update the existing volume with the new name. client.modifyVolume(volume_info['id'], new_vals) finally: self._logout(client) LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'."), { 'ref': existing_ref['source-name'], 'new': new_vol_name }) display_name = None if volume['display_name']: display_name = volume['display_name'] if volume_type: LOG.info( _LI("Virtual volume %(disp)s '%(new)s' is " "being retyped."), { 'disp': display_name, 'new': new_vol_name }) try: self.retype(None, volume, volume_type, volume_type['extra_specs'], volume['host']) LOG.info( _LI("Virtual volume %(disp)s successfully retyped to " "%(new_type)s."), { 'disp': display_name, 'new_type': volume_type.get('name') }) except Exception: with excutils.save_and_reraise_exception(): LOG.warning( _LW("Failed to manage virtual volume %(disp)s " "due to error during retype."), {'disp': display_name}) # Try to undo the rename and clear the new comment. client = self._login() try: client.modifyVolume(volume_info['id'], {'name': target_vol_name}) finally: self._logout(client) updates = {'display_name': display_name} LOG.info( _LI("Virtual volume %(disp)s '%(new)s' is " "now being managed."), { 'disp': display_name, 'new': new_vol_name }) # Return display name to update the name displayed in the GUI and # any model updates from retype. return updates
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize, sync=False, execute=utils.execute, ionice=None, sparse=False): cmd = prefix[:] if ionice: cmd.extend(('ionice', ionice)) blocksize = _check_blocksize(blocksize) size_in_bytes = size_in_m * units.Mi cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % size_in_bytes, 'bs=%s' % blocksize)) # Use O_DIRECT to avoid thrashing the system buffer cache odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct') cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes') if check_for_odirect_support(srcstr, deststr, 'oflag=direct'): cmd.append('oflag=direct') odirect = True # If the volume is being unprovisioned then # request the data is persisted before returning, # so that it's not discarded from the cache. conv = [] if sync and not odirect: conv.append('fdatasync') if sparse: conv.append('sparse') if conv: conv_options = 'conv=' + ",".join(conv) cmd.append(conv_options) # Perform the copy start_time = timeutils.utcnow() execute(*cmd, run_as_root=True) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 mbps = (size_in_m / duration) LOG.debug( "Volume copy details: src %(src)s, dest %(dest)s, " "size %(sz).2f MB, duration %(duration).2f sec", { "src": srcstr, "dest": deststr, "sz": size_in_m, "duration": duration }) LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"), { 'size_in_m': size_in_m, 'mbps': mbps })
def create(self, req, body): """Creates a new volume.""" self.assert_valid_body(body, 'volume') LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} self.validate_name_and_description(volume) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in volume: volume['display_name'] = volume.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in volume: volume['display_description'] = volume.pop('description') if 'image_id' in volume: volume['imageRef'] = volume.pop('image_id') req_volume_type = volume.get('volume_type', None) if req_volume_type: # Not found exception will be handled at the wsgi level if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: # Not found exception will be handled at the wsgi level kwargs['snapshot'] = self.volume_api.get_snapshot( context, snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: # Not found exception will be handled at the wsgi level kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) else: kwargs['source_volume'] = None source_replica = volume.get('source_replica') if source_replica is not None: # Not found exception will be handled at the wsgi level src_vol = self.volume_api.get_volume(context, source_replica) if src_vol['replication_status'] == 'disabled': explanation = _('source volume id:%s is not' ' replicated') % source_replica raise exc.HTTPBadRequest(explanation=explanation) kwargs['source_replica'] = src_vol else: kwargs['source_replica'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: # Not found exception will be handled at the wsgi level kwargs['consistencygroup'] = \ self.consistencygroup_api.get(context, consistencygroup_id) else: kwargs['consistencygroup'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] elif size is None and kwargs['source_replica'] is not None: size = kwargs['source_replica']['size'] LOG.info(_LI("Create volume of %s GB"), size) if self.ext_mgr.is_loaded('os-image-create'): image_ref = volume.get('imageRef') if image_ref is not None: image_uuid = self._image_uuid_from_ref(image_ref, context) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) kwargs['multiattach'] = utils.get_bool_param('multiattach', volume) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) retval = self._view_builder.detail(req, new_volume) return retval
def _do_export(self, _ctx, volume, ensure=False): """Do all steps to get zvol exported as LUN 0 at separate target. :param volume: reference of volume to be exported :param ensure: if True, ignore errors caused by already existing resources """ zvol_name = self._get_zvol_name(volume['name']) target_name = self._get_target_name(volume['name']) target_group_name = self._get_target_group_name(volume['name']) if not self._target_exists(target_name): try: self.nms.iscsitarget.create_target( {'target_name': target_name}) except nexenta.NexentaException as exc: if ensure and 'already configured' in exc.args[0]: LOG.info( _LI('Ignored target creation error "%s" while ' 'ensuring export'), exc) else: raise if not self._target_group_exists(target_group_name): try: self.nms.stmf.create_targetgroup(target_group_name) except nexenta.NexentaException as exc: if ((ensure and 'already exists' in exc.args[0]) or 'target must be offline' in exc.args[0]): LOG.info( _LI('Ignored target group creation error "%s" ' 'while ensuring export'), exc) else: raise if not self._target_member_in_target_group(target_group_name, target_name): try: self.nms.stmf.add_targetgroup_member(target_group_name, target_name) except nexenta.NexentaException as exc: if ((ensure and 'already exists' in exc.args[0]) or 'target must be offline' in exc.args[0]): LOG.info( _LI('Ignored target group member addition error ' '"%s" while ensuring export'), exc) else: raise if not self._lu_exists(zvol_name): try: self.nms.scsidisk.create_lu(zvol_name, {}) except nexenta.NexentaException as exc: if not ensure or 'in use' not in exc.args[0]: raise LOG.info( _LI('Ignored LU creation error "%s" while ensuring ' 'export'), exc) if not self._is_lu_shared(zvol_name): try: self.nms.scsidisk.add_lun_mapping_entry( zvol_name, {'target_group': target_group_name}) except nexenta.NexentaException as exc: if not ensure or 'view entry exists' not in exc.args[0]: raise LOG.info( _LI('Ignored LUN mapping entry addition error "%s" ' 'while ensuring export'), exc)
def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fc_fabric_names = self.configuration.fc_fabric_names fabrics = [x.strip() for x in fc_fabric_names.split(',')] LOG.debug("Fabric List: %(fabrics)s", {'fabrics': fabrics}) LOG.debug("Target WWN list: %(targetwwns)s", {'targetwwns': target_wwn_list}) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(utils.get_formatted_wwn(t)) LOG.debug("Formatted target WWN list: %(targetlist)s", {'targetlist': formatted_target_list}) for fabric_name in fabrics: conn = self._get_southbound_client(fabric_name) # Get name server data from fabric and get the targets # logged in. nsinfo = None try: nsinfo = conn.get_nameserver_info() LOG.debug("Name server info from fabric: %(nsinfo)s", {'nsinfo': nsinfo}) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting name server info.")) except Exception: msg = _("Failed to get name server info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() visible_targets = filter( lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %(targets)s"), {'targets': visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = str( visible_targets[idx]).replace(':', '') fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets found in the nameserver " "for fabric: %(fabric)s", {'fabric': fabric_name}) LOG.debug("Return SAN context output: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map
def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Delete connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for fabric %(policy)s"), {'policy': zoning_policy}) conn = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push changes to # fabric. This operation could result in an update for zone config # with new member list or deleting zones from active cfg. LOG.debug("zone config from Fabric: %(cfgmap)s", {'cfgmap': cfgmap_from_fabric}) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = utils.get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) LOG.debug("Zone name to delete: %(zonename)s", {'zonename': zone_name}) if len(zone_names) > 0 and (zone_name in zone_names): # delete zone. LOG.debug("Added zone to delete to list: %(zonename)s", {'zonename': zone_name}) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (zone_names and (zone_name in zone_names)): # Check to see if there are other zone members # in the zone besides the initiator and # the targets being removed. filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # If there are other zone members, proceed with # zone update to remove the targets. Otherwise, # delete the zone. if filtered_members: zone_members.remove(formatted_initiator) # Verify that the zone members in target list # are listed in zone definition. If not, remove # the zone members from the list of members # to remove, otherwise switch will return error. zm_list = cfgmap_from_fabric['zones'][zone_name] for t in t_list: formatted_target = utils.get_formatted_wwn(t) if formatted_target not in zm_list: zone_members.remove(formatted_target) if zone_members: LOG.debug("Zone members to remove: " "%(members)s", {'members': zone_members}) zone_map[zone_name] = zone_members else: zones_to_delete.append(zone_name) else: LOG.warning(_LW("Zoning policy not recognized: %(policy)s"), {'policy': zoning_policy}) LOG.debug("Zone map to update: %(zonemap)s", {'zonemap': zone_map}) LOG.debug("Zone list to delete: %(zones)s", {'zones': zones_to_delete}) try: # Update zone membership. if zone_map: conn.update_zones(zone_map, zone_activate, fc_zone_constants.ZONE_REMOVE, cfgmap_from_fabric) # Delete zones if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = '%s;%s' % ( zone_name_string, zones_to_delete[i]) conn.delete_zones( zone_name_string, zone_activate, cfgmap_from_fabric) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to update or delete zoning " "configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup()
def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Add connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for Fabric %(policy)s"), {'policy': zoning_policy}) if (zoning_policy != 'initiator' and zoning_policy != 'initiator-target'): LOG.info(_LI("Zoning policy is not valid, " "no zoning will be performed.")) return client = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(client) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} zone_update_map = {} initiator = initiator_key.lower() target_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for target in target_list: zone_members = [utils.get_formatted_wwn(initiator), utils.get_formatted_wwn(target)] zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. LOG.info(_LI("Zone exists in I-T mode. Skipping " "zone creation for %(zonename)s"), {'zonename': zone_name}) elif zoning_policy == 'initiator': zone_members = [utils.get_formatted_wwn(initiator)] for target in target_list: zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) # If zone exists, then do a zoneadd to update # the zone members in the existing zone. Otherwise, # do a zonecreate to create a new zone. if len(zone_names) > 0 and (zone_name in zone_names): # Verify that the target WWNs are not already members # of the existing zone. If so, remove them from the # list of members to add, otherwise error will be # returned from the switch. for t in target_list: if t in cfgmap_from_fabric['zones'][zone_name]: zone_members.remove(utils.get_formatted_wwn(t)) if zone_members: zone_update_map[zone_name] = zone_members else: zone_map[zone_name] = zone_members LOG.info(_LI("Zone map to create: %(zonemap)s"), {'zonemap': zone_map}) LOG.info(_LI("Zone map to update: %(zone_update_map)s"), {'zone_update_map': zone_update_map}) try: if zone_map: client.add_zones(zone_map, zone_activate, cfgmap_from_fabric) LOG.debug("Zones created successfully: %(zonemap)s", {'zonemap': zone_map}) if zone_update_map: client.update_zones(zone_update_map, zone_activate, fc_zone_constants.ZONE_ADD, cfgmap_from_fabric) LOG.debug("Zones updated successfully: %(updatemap)s", {'updatemap': zone_update_map}) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to add or update zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: client.cleanup()
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" try: self.client.req('volumes', 'DELETE', name=snapshot.id) except exception.NotFound: LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
def do_setup(self, context): if not self.configuration.nfs_oversub_ratio > 0: msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: " "%s") % self.configuration.nfs_oversub_ratio LOG.error(msg) raise exception.NfsException(msg) if ((not self.configuration.nfs_used_ratio > 0) and (self.configuration.nfs_used_ratio <= 1)): msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 " "and <= 1.0: %s") % self.configuration.nfs_used_ratio LOG.error(msg) raise exception.NfsException(msg) package = 'mount.nfs' try: self._execute(package, check_exit_code=False, run_as_root=True) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed') % package raise exception.NfsException(msg) else: raise lcfg = self.configuration LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) host = lcfg.san_ip user = lcfg.san_login password = lcfg.san_password https_port = lcfg.zfssa_https_port credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip'] for cred in credentials: if not getattr(lcfg, cred, None): exception_msg = _('%s not set in cinder.conf') % cred LOG.error(exception_msg) raise exception.CinderException(exception_msg) self.zfssa = factory_zfssa() self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout) auth_str = base64.encodestring('%s:%s' % (user, password))[:-1] self.zfssa.login(auth_str) self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, compression=lcfg.zfssa_nfs_share_compression, logbias=lcfg.zfssa_nfs_share_logbias) share_args = { 'sharedav': 'rw', 'sharenfs': 'rw', 'root_permissions': '777', 'compression': lcfg.zfssa_nfs_share_compression, 'logbias': lcfg.zfssa_nfs_share_logbias } self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, share_args) share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) mountpoint = share_details['mountpoint'] self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \ '/shares' + mountpoint LOG.debug('NFS mount path: %s', self.mount_path) LOG.debug('WebDAV path to the share: %s', https_path) self.shares = {} mnt_opts = self.configuration.zfssa_nfs_mount_options self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None # Initialize the WebDAV client self.zfssa.set_webdav(https_path, auth_str) # Edit http service so that WebDAV requests are always authenticated args = {'https_port': https_port, 'require_login': True} self.zfssa.modify_service('http', args) self.zfssa.enable_service('http') if lcfg.zfssa_enable_local_cache: LOG.debug('Creating local cache directory %s.', lcfg.zfssa_cache_directory) self.zfssa.create_directory(lcfg.zfssa_cache_directory)
def migrate_volume(self, ctxt, volume, host): """Migrate the volume to the specified host. Backend assisted volume migration will occur if and only if; 1. Same LeftHand backend 2. Volume cannot be attached 3. Volumes with snapshots cannot be migrated 4. Source and Destination clusters must be in the same management group Volume re-type is not supported. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug( 'enter: migrate_volume: id=%(id)s, host=%(host)s, ' 'cluster=%(cluster)s', { 'id': volume['id'], 'host': host, 'cluster': self.configuration.hplefthand_clustername }) false_ret = (False, None) if 'location_info' not in host['capabilities']: return false_ret host_location = host['capabilities']['location_info'] (driver, cluster, vip) = host_location.split(' ') client = self._login() try: # get the cluster info, if it exists and compare cluster_info = client.getClusterByName(cluster) LOG.debug('Cluster info: %s', cluster_info) virtual_ips = cluster_info['virtualIPAddresses'] if driver != self.__class__.__name__: LOG.info( _LI("Cannot provide backend assisted migration for " "volume: %s because volume is from a different " "backend."), volume['name']) return false_ret if vip != virtual_ips[0]['ipV4Address']: LOG.info( _LI("Cannot provide backend assisted migration for " "volume: %s because cluster exists in different " "management group."), volume['name']) return false_ret except hpexceptions.HTTPNotFound: LOG.info( _LI("Cannot provide backend assisted migration for " "volume: %s because cluster exists in different " "management group."), volume['name']) return false_ret finally: self._logout(client) client = self._login() try: volume_info = client.getVolumeByName(volume['name']) LOG.debug('Volume info: %s', volume_info) # can't migrate if server is attached if volume_info['iscsiSessions'] is not None: LOG.info( _LI("Cannot provide backend assisted migration " "for volume: %s because the volume has been " "exported."), volume['name']) return false_ret # can't migrate if volume has snapshots snap_info = client.getVolume( volume_info['id'], 'fields=snapshots,snapshots[resource[members[name]]]') LOG.debug('Snapshot info: %s', snap_info) if snap_info['snapshots']['resource'] is not None: LOG.info( _LI("Cannot provide backend assisted migration " "for volume: %s because the volume has " "snapshots."), volume['name']) return false_ret options = {'clusterName': cluster} client.modifyVolume(volume_info['id'], options) except hpexceptions.HTTPNotFound: LOG.info( _LI("Cannot provide backend assisted migration for " "volume: %s because volume does not exist in this " "management group."), volume['name']) return false_ret except hpexceptions.HTTPServerError as ex: LOG.error(_LE("Exception: %s"), ex) return false_ret finally: self._logout(client) return (True, None)
def _restore_v1(self, backup, volume_id, metadata, volume_file): """Restore a v1 swift volume backup from swift.""" backup_id = backup['id'] LOG.debug('v1 swift volume backup restore of %s started', backup_id) container = backup['container'] metadata_objects = metadata['objects'] metadata_object_names = sum((obj.keys() for obj in metadata_objects), []) LOG.debug('metadata_object_names = %s' % metadata_object_names) prune_list = [self._metadata_filename(backup)] swift_object_names = [ swift_object_name for swift_object_name in self._generate_object_names(backup) if swift_object_name not in prune_list ] if sorted(swift_object_names) != sorted(metadata_object_names): err = _('restore_backup aborted, actual swift object list in ' 'swift does not match object list stored in metadata') raise exception.InvalidBackup(reason=err) for metadata_object in metadata_objects: object_name = metadata_object.keys()[0] LOG.debug( 'restoring object from swift. backup: %(backup_id)s, ' 'container: %(container)s, swift object name: ' '%(object_name)s, volume: %(volume_id)s' % { 'backup_id': backup_id, 'container': container, 'object_name': object_name, 'volume_id': volume_id, }) try: (_resp, body) = self.conn.get_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) compression_algorithm = metadata_object[object_name]['compression'] decompressor = self._get_compressor(compression_algorithm) if decompressor is not None: LOG.debug('decompressing data using %s algorithm' % compression_algorithm) decompressed = decompressor.decompress(body) volume_file.write(decompressed) else: volume_file.write(body) # force flush every write to avoid long blocking write on close volume_file.flush() # Be tolerant to IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.info( _LI("volume_file does not support " "fileno() so skipping" "fsync()")) else: os.fsync(fileno) # Restoring a backup to a volume can take some time. Yield so other # threads can run, allowing for among other things the service # status to be updated eventlet.sleep(0) LOG.debug('v1 swift volume backup restore of %s finished', backup_id)
def delete_volume(self, volume): """Deletes a volume.""" try: self.client.req('volumes', 'DELETE', name=volume['id']) except exception.NotFound: LOG.info(_LI("volume %s doesn't exist"), volume['id'])
def __init__(self): LOG.info(_LI('Initializing extension manager.')) self.cls_list = CONF.osapi_volume_extension self.extensions = {} self._load_extensions()
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.info(_LI('Deleting snapshot: %s'), snapshot['name']) self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name'])
def do_setup(self, context): """Setup - create multiple elements. Project, initiators, initiatorgroup, target and targetgroup. """ lcfg = self.configuration LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) self.zfssa = factory_zfssa() self.tgt_zfssa = factory_zfssa() self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout) auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str)[:-1] self.zfssa.login(auth_str) self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias) if lcfg.zfssa_enable_local_cache: self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_cache_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias) schemas = [ {'property': 'image_id', 'description': 'OpenStack image ID', 'type': 'String'}, {'property': 'updated_at', 'description': 'Most recent updated time of image', 'type': 'String'}] self.zfssa.create_schemas(schemas) if (lcfg.zfssa_initiator_config != ''): initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) for initiator_group in initiator_config: zfssa_initiator_group = initiator_group for zfssa_initiator in initiator_config[zfssa_initiator_group]: self.zfssa.create_initiator(zfssa_initiator['iqn'], zfssa_initiator_group + '-' + zfssa_initiator['iqn'], chapuser= zfssa_initiator['user'], chapsecret= zfssa_initiator['password']) if (zfssa_initiator_group != 'default'): self.zfssa.add_to_initiatorgroup( zfssa_initiator['iqn'], zfssa_initiator_group) else: LOG.warning(_LW('zfssa_initiator_config not found. ' 'Using deprecated configuration options.')) if (lcfg.zfssa_initiator != '' and (lcfg.zfssa_initiator_group == '' or lcfg.zfssa_initiator_group == 'default')): LOG.warning(_LW('zfssa_initiator: %(ini)s' ' wont be used on ' 'zfssa_initiator_group= %(inigrp)s.'), {'ini': lcfg.zfssa_initiator, 'inigrp': lcfg.zfssa_initiator_group}) # Setup initiator and initiator group if (lcfg.zfssa_initiator != '' and lcfg.zfssa_initiator_group != '' and lcfg.zfssa_initiator_group != 'default'): for initiator in lcfg.zfssa_initiator.split(','): self.zfssa.create_initiator( initiator, lcfg.zfssa_initiator_group + '-' + initiator, chapuser=lcfg.zfssa_initiator_user, chapsecret=lcfg.zfssa_initiator_password) self.zfssa.add_to_initiatorgroup( initiator, lcfg.zfssa_initiator_group) # Parse interfaces interfaces = [] for interface in lcfg.zfssa_target_interfaces.split(','): if interface == '': continue interfaces.append(interface) # Setup target and target group iqn = self.zfssa.create_target( self._get_target_alias(), interfaces, tchapuser=lcfg.zfssa_target_user, tchapsecret=lcfg.zfssa_target_password) self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
def do_setup(self, context): """Perform internal driver setup.""" self.context = context self._load_shares_config( getattr(self.configuration, self.driver_prefix + '_shares_config')) LOG.info(_LI("Review shares: %s"), self.shares) nfs_info = self._get_nfs_info() LOG.debug("nfs_info: %s", nfs_info) for share in self.shares: if share in nfs_info.keys(): LOG.info(_LI("share: %(share)s -> %(info)s"), { 'share': share, 'info': nfs_info[share]['path'] }) for svc in self.config['services'].keys(): if share == self.config['services'][svc]['hdp']: self.config['services'][svc]['path'] = \ nfs_info[share]['path'] # don't overwrite HDP value self.config['services'][svc]['fsid'] = \ nfs_info[share]['hdp'] self.config['services'][svc]['fslabel'] = \ nfs_info[share]['fslabel'] LOG.info( _LI("Save service info for" " %(svc)s -> %(hdp)s, %(path)s"), { 'svc': svc, 'hdp': nfs_info[share]['hdp'], 'path': nfs_info[share]['path'] }) break if share != self.config['services'][svc]['hdp']: LOG.error( _LE("NFS share %(share)s has no service entry:" " %(svc)s -> %(hdp)s"), { 'share': share, 'svc': svc, 'hdp': self.config['services'][svc]['hdp'] }) raise exception.ParameterNotFound(param=svc) else: LOG.info(_LI("share: %s incorrect entry"), share) LOG.debug("self.config['services'] = %s", self.config['services']) service_list = self.config['services'].keys() for svc in service_list: svc = self.config['services'][svc] pool = {} pool['pool_name'] = svc['volume_type'] pool['service_label'] = svc['volume_type'] pool['hdp'] = svc['hdp'] self.pools.append(pool) LOG.info(_LI("Configured pools: %s"), self.pools)
def create(self, group, replica_model): group_id = group.get('id') LOG.info(_LI("Create Consistency Group: %(group)s."), {'group': group_id}) group_name = huawei_utils.encode_name(group_id) self.local_cgop.create(group_name, group_id, replica_model)
def migrate_volume(self, ctxt, volume, host): LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, ' 'host: %(host)s, status=%(status)s.', {'id': volume['id'], 'host': host, 'status': volume['status']}) lcfg = self.configuration default_ret = (False, None) if volume['status'] != "available": LOG.debug('Only available volumes can be migrated using backend ' 'assisted migration. Defaulting to generic migration.') return default_ret if (host['capabilities']['vendor_name'] != 'Oracle' or host['capabilities']['storage_protocol'] != self.protocol): LOG.debug('Source and destination drivers need to be Oracle iSCSI ' 'to use backend assisted migration. Defaulting to ' 'generic migration.') return default_ret if 'location_info' not in host['capabilities']: LOG.debug('Could not find location_info in capabilities reported ' 'by the destination driver. Defaulting to generic ' 'migration.') return default_ret loc_info = host['capabilities']['location_info'] try: (tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup, tgt_repl_ip) = loc_info.split(':') except ValueError: LOG.error(_LE("Location info needed for backend enabled volume " "migration not in correct format: %s. Continuing " "with generic volume migration."), loc_info) return default_ret if tgt_repl_ip == '': msg = _LE("zfssa_replication_ip not set in cinder.conf. " "zfssa_replication_ip is needed for backend enabled " "volume migration. Continuing with generic volume " "migration.") LOG.error(msg) return default_ret src_pool = lcfg.zfssa_pool src_project = lcfg.zfssa_project try: LOG.info(_LI('Connecting to target host: %s for backend enabled ' 'migration.'), tgt_host) self.tgt_zfssa.set_host(tgt_host) self.tgt_zfssa.login(auth_str) # Verify that the replication service is online try: self.zfssa.verify_service('replication') self.tgt_zfssa.verify_service('replication') except exception.VolumeBackendAPIException: return default_ret # ensure that a target group by the same name exists on the target # system also, if not, use default migration. lun = self.zfssa.get_lun(src_pool, src_project, volume['name']) if lun['targetgroup'] != tgt_tgtgroup: return default_ret tgt_asn = self.tgt_zfssa.get_asn() src_asn = self.zfssa.get_asn() # verify on the source system that the destination has been # registered as a replication target tgts = self.zfssa.get_replication_targets() targets = [] for target in tgts['targets']: if target['asn'] == tgt_asn: targets.append(target) if targets == []: LOG.debug('Target host: %(host)s for volume migration ' 'not configured as a replication target ' 'for volume: %(vol)s.', {'host': tgt_repl_ip, 'vol': volume['name']}) return default_ret # Multiple ips from the same appliance may be configured # as different targets for target in targets: if target['address'] == tgt_repl_ip + ':216': break if target['address'] != tgt_repl_ip + ':216': LOG.debug('Target with replication ip: %s not configured on ' 'the source appliance for backend enabled volume ' 'migration. Proceeding with default migration.', tgt_repl_ip) return default_ret flow = lf.Flow('zfssa_volume_migration').add( MigrateVolumeInit(), MigrateVolumeCreateAction(provides='action_id'), MigrateVolumeSendReplUpdate(), MigrateVolumeSeverRepl(), MigrateVolumeMoveVol(), MigrateVolumeCleanUp() ) taskflow.engines.run(flow, store={'driver': self, 'tgt_zfssa': self.tgt_zfssa, 'tgt_pool': tgt_pool, 'tgt_project': tgt_project, 'volume': volume, 'tgt_asn': tgt_asn, 'src_zfssa': self.zfssa, 'src_asn': src_asn, 'src_pool': src_pool, 'src_project': src_project, 'target': target}) return(True, None) except Exception: LOG.error(_LE("Error migrating volume: %s"), volume['name']) raise
def get_initiator_grp_list(self): """Execute getInitiatorGrpList API.""" response = self._execute_get_initiator_grp_list() LOG.info(_LI('Successfully retrieved InitiatorGrpList')) return (response['initiatorgrp-list'] if 'initiatorgrp-list' in response else [])
def create_volume(self, volume): tsm_name = self.configuration.cb_tsm_name account_name = self.configuration.cb_account_name # Get account id of this account account_id = self._get_account_id_from_name(account_name) # Set backend storage volume name using OpenStack volume id cb_volume_name = volume['id'].replace("-", "") LOG.debug( "Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] " "at CloudByte storage w.r.t " "OpenStack volume [%(stack_vol)s].", { 'cb_vol': cb_volume_name, 'stack_vol': volume.get('id'), 'tsm': tsm_name }) tsm_data = self._request_tsm_details(account_id) tsm_details = self._get_tsm_details(tsm_data, tsm_name) # Send request to create a qos group before creating a volume LOG.debug("Creating qos group for CloudByte volume [%s].", cb_volume_name) qos_data = self._add_qos_group_request(volume, tsm_details.get('tsmid'), cb_volume_name) # Extract the qos group id from response qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id'] LOG.debug("Successfully created qos group for CloudByte volume [%s].", cb_volume_name) # Send a create volume request to CloudByte API vol_data = self._create_volume_request(volume, tsm_details.get('datasetid'), qosgroupid, tsm_details.get('tsmid'), cb_volume_name) # Since create volume is an async call; # need to confirm the creation before proceeding further self._wait_for_volume_creation(vol_data, cb_volume_name) # Fetch iscsi id cb_volumes = self._api_request_for_cloudbyte('listFileSystem', params={}) volume_id = self._get_volume_id_from_response(cb_volumes, cb_volume_name) params = {"storageid": volume_id} iscsi_service_data = self._api_request_for_cloudbyte( 'listVolumeiSCSIService', params) iscsi_id = self._get_iscsi_service_id_from_response( volume_id, iscsi_service_data) # Fetch the initiator group ID params = {"accountid": account_id} iscsi_initiator_data = self._api_request_for_cloudbyte( 'listiSCSIInitiator', params) ig_id = self._get_initiator_group_id_from_response( iscsi_initiator_data) LOG.debug("Updating iscsi service for CloudByte volume [%s].", cb_volume_name) # Update the iscsi service with above fetched iscsi_id & ig_id self._request_update_iscsi_service(iscsi_id, ig_id) LOG.debug( "CloudByte volume [%(vol)s] updated with " "iscsi id [%(iscsi)s] and ig id [%(ig)s].", { 'vol': cb_volume_name, 'iscsi': iscsi_id, 'ig': ig_id }) # Provide the model after successful completion of above steps provider = self._build_provider_details_from_response( cb_volumes, cb_volume_name) LOG.info( _LI("Successfully created a CloudByte volume [%(cb_vol)s] " "w.r.t OpenStack volume [%(stack_vol)s]."), { 'cb_vol': cb_volume_name, 'stack_vol': volume.get('id') }) return provider
def delete_snap(self, vol_name, snap_name, *args, **kwargs): """Execute deleteSnap API.""" LOG.info(_LI('Deleting snapshot %s '), snap_name) return self.client.service.deleteSnap(request={'sid': self.sid, 'vol': vol_name, 'name': snap_name})
def delete_initiator_group(self, initiator_group_name, *args, **kwargs): """Execute deleteInitiatorGrp API.""" LOG.info(_LI('Deleting deleteInitiatorGrp %s '), initiator_group_name) return self.client.service.deleteInitiatorGrp( request={'sid': self.sid, 'name': initiator_group_name})
def dissociate_volcoll(self, vol_name, *args, **kwargs): """Execute dissocProtPol API.""" LOG.info(_LI('Dissociating volume %s '), vol_name) return self.client.service.dissocProtPol( request={'sid': self.sid, 'vol-name': vol_name})
def _execute_get_initiator_grp_list(self): LOG.info(_LI('Getting getInitiatorGrpList')) return (self.client.service.getInitiatorGrpList( request={'sid': self.sid}))
def _execute_get_vol_info(self, vol_name): LOG.info(_LI('Getting volume information ' 'for vol_name=%s'), vol_name) return self.client.service.getVolInfo(request={'sid': self.sid, 'name': vol_name})
def delete_vol(self, vol_name, *args, **kwargs): """Execute deleteVol API.""" LOG.info(_LI('Deleting volume %s '), vol_name) return self.client.service.deleteVol(request={'sid': self.sid, 'name': vol_name})
def login(self): """Execute Https Login API.""" response = self._execute_login() LOG.info(_LI('Successful login by user %s'), self.username) self.sid = response['authInfo']['sid']
def get_vol_info(self, vol_name): """Execute getVolInfo API.""" response = self._execute_get_vol_info(vol_name) LOG.info(_LI('Successfully got volume information for volume %s'), vol_name) return response['vol']
def smis_get_iscsi_properties(self, volume, connector, ip_and_iqn, is_multipath): """Gets iscsi configuration. We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the UUID of the volume :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. """ device_info, __, __ = self.common.find_device_number( volume, connector['host']) isError = False if device_info: try: lun_id = device_info['hostlunid'] except KeyError: isError = True else: isError = True if isError: LOG.error(_LE("Unable to get the lun id")) exception_message = (_("Cannot find device number for volume " "%(volumeName)s.") % { 'volumeName': volume['name'] }) raise exception.VolumeBackendAPIException(data=exception_message) properties = {} if len(ip_and_iqn) > 1 and is_multipath: properties['target_portals'] = ([ t['ip'] + ":3260" for t in ip_and_iqn ]) properties['target_iqns'] = ([ t['iqn'].split(",")[0] for t in ip_and_iqn ]) properties['target_luns'] = [lun_id] * len(ip_and_iqn) properties['target_discovered'] = True properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0] properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260" properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] LOG.info(_LI("ISCSI properties: %(properties)s."), {'properties': properties}) LOG.info(_LI("ISCSI volume is: %(volume)s."), {'volume': volume}) if 'provider_auth' in volume: auth = volume['provider_auth'] LOG.info(_LI("AUTH properties: %(authProps)s."), {'authProps': auth}) if auth is not None: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret LOG.info(_LI("AUTH properties: %s."), properties) return properties
def create_vol(self, volume, pool_name, reserve): """Execute createVol API.""" response = self._execute_create_vol(volume, pool_name, reserve) LOG.info(_LI('Successfully create volume %s'), response['name']) return response['name']